code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = RoCBertTokenizer
__UpperCAmelCase : Any = None
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : str = filter_non_english
def __UpperCAmelCase ( self ):
super().setUp()
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''你''', '''好''', '''是''', '''谁''', '''a''', '''b''', '''c''', '''d''']
__a = {}
__a = {}
for i, value in enumerate(_a ):
__a = i
__a = i
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_shape_file'''] )
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''word_pronunciation_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
with open(self.word_shape_file , '''w''' , encoding='''utf-8''' ) as word_shape_writer:
json.dump(_a , _a , ensure_ascii=_a )
with open(self.word_pronunciation_file , '''w''' , encoding='''utf-8''' ) as word_pronunciation_writer:
json.dump(_a , _a , ensure_ascii=_a )
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__a = tokenizer.tokenize('''你好[SEP]你是谁''' )
self.assertListEqual(_a , ['''你''', '''好''', '''[SEP]''', '''你''', '''是''', '''谁'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_a ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_a ) , [5, 6, 2, 5, 7, 8] )
def __UpperCAmelCase ( self ):
__a = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __UpperCAmelCase ( self ):
__a = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def __UpperCAmelCase ( self ):
__a = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def __UpperCAmelCase ( self ):
__a = RoCBertBasicTokenizer(do_lower_case=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = RoCBertBasicTokenizer(do_lower_case=_a , strip_accents=_a )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __UpperCAmelCase ( self ):
__a = RoCBertBasicTokenizer(do_lower_case=_a , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __UpperCAmelCase ( self ):
__a = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__a = {}
for i, token in enumerate(_a ):
__a = i
__a = RoCBertWordpieceTokenizer(vocab=_a , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __UpperCAmelCase ( self ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __UpperCAmelCase ( self ):
__a = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
if self.test_rust_tokenizer:
__a = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_a ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
def __UpperCAmelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__a = tokenizer_r.encode_plus(
_a , return_attention_mask=_a , return_token_type_ids=_a , return_offsets_mapping=_a , add_special_tokens=_a , )
__a = tokenizer_r.do_lower_case if hasattr(_a , '''do_lower_case''' ) else False
__a = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def __UpperCAmelCase ( self ):
__a = ['''的''', '''人''', '''有''']
__a = ''''''.join(_a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__a = True
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
__a = False
__a = self.rust_tokenizer_class.from_pretrained(_a , **_a )
__a = self.tokenizer_class.from_pretrained(_a , **_a )
__a = tokenizer_r.encode(_a , add_special_tokens=_a )
__a = tokenizer_p.encode(_a , add_special_tokens=_a )
__a = tokenizer_r.convert_ids_to_tokens(_a )
__a = tokenizer_p.convert_ids_to_tokens(_a )
# it is expected that only the first Chinese character is not preceded by "##".
__a = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(_a )
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , _a )
@slow
def __UpperCAmelCase ( self ):
__a = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__a = tokenizer.encode('''你好''' , add_special_tokens=_a )
__a = tokenizer.encode('''你是谁''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __UpperCAmelCase ( self ):
__a = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
__a = '''你好,你是谁'''
__a = tokenizer.tokenize(_a )
__a = tokenizer.convert_tokens_to_ids(_a )
__a = tokenizer.convert_tokens_to_shape_ids(_a )
__a = tokenizer.convert_tokens_to_pronunciation_ids(_a )
__a = tokenizer.prepare_for_model(
_a , _a , _a , add_special_tokens=_a )
__a = tokenizer.encode_plus(_a , add_special_tokens=_a )
self.assertEqual(_a , _a )
| 358 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> list:
__a = []
__a , __a = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__a = result + left + right
return input_list
def lowercase ( lowerCAmelCase__ : list ) -> list:
if len(lowerCAmelCase__ ) <= 1:
return input_list
__a = list(lowerCAmelCase__ )
# iteration for two-way merging
__a = 2
while p <= len(lowerCAmelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
__a = i
__a = i + p - 1
__a = (low + high + 1) // 2
__a = merge(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# final merge of last two parts
if p * 2 >= len(lowerCAmelCase__ ):
__a = i
__a = merge(lowerCAmelCase__ , 0 , lowerCAmelCase__ , len(lowerCAmelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowercase_ = input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
lowercase_ = []
else:
lowercase_ = [int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 359 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 0 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowercase_ = logging.getLogger(__name__)
lowercase_ = "pytorch_model.bin"
@dataclasses.dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCAmelCase : str = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The name of the task to train on.'} , )
__UpperCAmelCase : Optional[List[str]] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
__UpperCAmelCase : Optional[str] = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
__UpperCAmelCase : Optional[bool] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
__UpperCAmelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
__UpperCAmelCase : Optional[int] = dataclasses.field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Random seed for initialization.'} , )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
__a = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
__a = dataset.filter(lambda lowerCAmelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
__a = int(eval_result * len(lowerCAmelCase__ ) )
print(lowerCAmelCase__ )
__a = dataset.sort('''probability''' , reverse=lowerCAmelCase__ )
__a = dataset.select(range(lowerCAmelCase__ ) )
__a = dataset.remove_columns(['''label''', '''probability'''] )
__a = dataset.rename_column('''prediction''' , '''label''' )
__a = dataset.map(lambda lowerCAmelCase__ : {"label": idalabel[example["label"]]} )
__a = dataset.shuffle(seed=args.seed )
__a = os.path.join(lowerCAmelCase__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowerCAmelCase__ , index=lowerCAmelCase__ )
else:
dataset.to_json(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : List[Any] ) -> str:
__a = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
__a = STModelArguments(model_name_or_path=lowerCAmelCase__ )
__a = STDataArguments(train_file=lowerCAmelCase__ , infer_file=lowerCAmelCase__ )
__a = STTrainingArguments(output_dir=lowerCAmelCase__ )
__a = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowerCAmelCase__ ).items():
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
for key, value in kwargs.items():
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
setattr(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Sanity checks
__a = {}
__a = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
__a = args.train_file
__a = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
__a = args.eval_file
for key in data_files:
__a = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
__a = extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
__a = f'''{args.output_dir}/self-train_iter-{{}}'''.format
__a = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowerCAmelCase__ )
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
accelerator.wait_for_everyone()
__a = None
__a = None
__a = 0
__a = False
# Show the progress bar
__a = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
__a = data_dir_format(lowerCAmelCase__ )
assert os.path.exists(lowerCAmelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
__a = os.path.join(lowerCAmelCase__ , '''stage-1''' )
__a = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
arguments_dict.update({key: value} )
__a = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowerCAmelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
__a = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' )
__a = os.path.join(lowerCAmelCase__ , '''stage-2''' )
# Update arguments_dict
__a = model_path
__a = data_files['''train''']
__a = current_output_dir
__a = os.path.join(lowerCAmelCase__ , '''best-checkpoint''' , lowerCAmelCase__ )
if os.path.exists(lowerCAmelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowerCAmelCase__ , lowerCAmelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowerCAmelCase__ )
finetune(**lowerCAmelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(lowerCAmelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowerCAmelCase__ )
__a = iteration
__a = data_dir_format(iteration + 1 )
__a = AutoConfig.from_pretrained(os.path.join(lowerCAmelCase__ , '''best-checkpoint''' ) )
__a = config.idalabel
__a = os.path.join(lowerCAmelCase__ , '''eval_results_best-checkpoint.json''' )
__a = os.path.join(lowerCAmelCase__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''r''' ) as f:
__a = float(json.load(lowerCAmelCase__ )[args.eval_metric] )
__a = os.path.join(lowerCAmelCase__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowerCAmelCase__ )
# Loading the dataset from local csv or json files.
__a = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
__a = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowerCAmelCase__ ):
shutil.copy(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.wait_for_everyone()
__a = os.path.join(lowerCAmelCase__ , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
__a = eval_result
if best_iteration is None:
__a = new_iteration
__a = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
__a = new_iteration
__a = new_eval_result
__a = 0
else:
if new_eval_result == best_eval_result:
__a = new_iteration
__a = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
__a = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowerCAmelCase__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowerCAmelCase__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowerCAmelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowerCAmelCase__ , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowerCAmelCase__ , '''eval_results_best-iteration.json''' ) , )
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def lowercase ( ) -> Dict:
__a = HfArgumentParser(lowerCAmelCase__ )
__a = parser.parse_args_into_dataclasses()[0]
__a = TensorFlowBenchmark(args=lowerCAmelCase__ )
try:
__a = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
__a = '''Arg --no_{0} is no longer used, please use --no-{0} instead.'''
__a = ''' '''.join(str(lowerCAmelCase__ ).split(''' ''' )[:-1] )
__a = ''''''
__a = eval(str(lowerCAmelCase__ ).split(''' ''' )[-1] )
__a = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) > 0:
__a = full_error_msg + begin_error_msg + str(lowerCAmelCase__ )
raise ValueError(lowerCAmelCase__ )
benchmark.run()
if __name__ == "__main__":
main()
| 361 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 0 |
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Dict = XGLMTokenizer
__UpperCAmelCase : Any = XGLMTokenizerFast
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : str = True
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = XGLMTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ):
__a = '''<pad>'''
__a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __UpperCAmelCase ( self ):
__a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(len(_a ) , 1_008 )
def __UpperCAmelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_008 )
def __UpperCAmelCase ( self ):
__a = XGLMTokenizer(_a , keep_accents=_a )
__a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
@cached_property
def __UpperCAmelCase ( self ):
return XGLMTokenizer.from_pretrained('''facebook/xglm-564M''' )
def __UpperCAmelCase ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(_a , f.name )
__a = XGLMTokenizer(f.name , keep_accents=_a )
__a = pickle.dumps(_a )
pickle.loads(_a )
def __UpperCAmelCase ( self ):
if not self.test_rust_tokenizer:
return
__a = self.get_tokenizer()
__a = self.get_rust_tokenizer()
__a = '''I was born in 92000, and this is falsé.'''
__a = tokenizer.tokenize(_a )
__a = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
__a = tokenizer.encode(_a , add_special_tokens=_a )
__a = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
__a = self.get_rust_tokenizer()
__a = tokenizer.encode(_a )
__a = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
@slow
def __UpperCAmelCase ( self ):
__a = '''Hello World!'''
__a = [2, 31_227, 4_447, 35]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __UpperCAmelCase ( self ):
__a = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'''
)
# fmt: off
__a = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __UpperCAmelCase ( self ):
# fmt: off
__a = {
'''input_ids''': [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]],
'''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''facebook/xglm-564M''' , padding=_a , )
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 0 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCAmelCase : ClassVar[Features] = Features({'audio': Audio()} )
__UpperCAmelCase : ClassVar[Features] = Features({'transcription': Value('string' )} )
__UpperCAmelCase : str = "audio"
__UpperCAmelCase : str = "transcription"
def __UpperCAmelCase ( self , _a ):
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , _a ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
__a = copy.deepcopy(self )
__a = self.input_schema.copy()
__a = features[self.audio_column]
__a = input_schema
return task_template
@property
def __UpperCAmelCase ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 363 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : Dict ) -> int:
__a = len(lowerCAmelCase__ )
__a = sum(lowerCAmelCase__ )
__a = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
__a = True
for i in range(1 , s + 1 ):
__a = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
__a = dp[i][j - 1]
if arr[i - 1] <= j:
__a = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
__a = s - 2 * j
break
return diff
| 364 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 0 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def lowercase ( lowerCAmelCase__ : str = "https://www.worldometers.info/coronavirus" ) -> dict:
__a = BeautifulSoup(requests.get(lowerCAmelCase__ ).text , '''html.parser''' )
__a = soup.findAll('''h1''' )
__a = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(lowerCAmelCase__ , lowerCAmelCase__ )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(F'''{key}\n{value}\n''')
| 365 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 366 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/nllb-200-distilled-600M": (
"https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"
),
},
}
lowercase_ = {
"facebook/nllb-large-en-ro": 1_0_2_4,
"facebook/nllb-200-distilled-600M": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ace_Arab", "ace_Latn", "acm_Arab", "acq_Arab", "aeb_Arab", "afr_Latn", "ajp_Arab", "aka_Latn", "amh_Ethi", "apc_Arab", "arb_Arab", "ars_Arab", "ary_Arab", "arz_Arab", "asm_Beng", "ast_Latn", "awa_Deva", "ayr_Latn", "azb_Arab", "azj_Latn", "bak_Cyrl", "bam_Latn", "ban_Latn", "bel_Cyrl", "bem_Latn", "ben_Beng", "bho_Deva", "bjn_Arab", "bjn_Latn", "bod_Tibt", "bos_Latn", "bug_Latn", "bul_Cyrl", "cat_Latn", "ceb_Latn", "ces_Latn", "cjk_Latn", "ckb_Arab", "crh_Latn", "cym_Latn", "dan_Latn", "deu_Latn", "dik_Latn", "dyu_Latn", "dzo_Tibt", "ell_Grek", "eng_Latn", "epo_Latn", "est_Latn", "eus_Latn", "ewe_Latn", "fao_Latn", "pes_Arab", "fij_Latn", "fin_Latn", "fon_Latn", "fra_Latn", "fur_Latn", "fuv_Latn", "gla_Latn", "gle_Latn", "glg_Latn", "grn_Latn", "guj_Gujr", "hat_Latn", "hau_Latn", "heb_Hebr", "hin_Deva", "hne_Deva", "hrv_Latn", "hun_Latn", "hye_Armn", "ibo_Latn", "ilo_Latn", "ind_Latn", "isl_Latn", "ita_Latn", "jav_Latn", "jpn_Jpan", "kab_Latn", "kac_Latn", "kam_Latn", "kan_Knda", "kas_Arab", "kas_Deva", "kat_Geor", "knc_Arab", "knc_Latn", "kaz_Cyrl", "kbp_Latn", "kea_Latn", "khm_Khmr", "kik_Latn", "kin_Latn", "kir_Cyrl", "kmb_Latn", "kon_Latn", "kor_Hang", "kmr_Latn", "lao_Laoo", "lvs_Latn", "lij_Latn", "lim_Latn", "lin_Latn", "lit_Latn", "lmo_Latn", "ltg_Latn", "ltz_Latn", "lua_Latn", "lug_Latn", "luo_Latn", "lus_Latn", "mag_Deva", "mai_Deva", "mal_Mlym", "mar_Deva", "min_Latn", "mkd_Cyrl", "plt_Latn", "mlt_Latn", "mni_Beng", "khk_Cyrl", "mos_Latn", "mri_Latn", "zsm_Latn", "mya_Mymr", "nld_Latn", "nno_Latn", "nob_Latn", "npi_Deva", "nso_Latn", "nus_Latn", "nya_Latn", "oci_Latn", "gaz_Latn", "ory_Orya", "pag_Latn", "pan_Guru", "pap_Latn", "pol_Latn", "por_Latn", "prs_Arab", "pbt_Arab", "quy_Latn", "ron_Latn", "run_Latn", "rus_Cyrl", "sag_Latn", "san_Deva", "sat_Beng", "scn_Latn", "shn_Mymr", "sin_Sinh", "slk_Latn", "slv_Latn", "smo_Latn", "sna_Latn", "snd_Arab", "som_Latn", "sot_Latn", "spa_Latn", "als_Latn", "srd_Latn", "srp_Cyrl", "ssw_Latn", "sun_Latn", "swe_Latn", "swh_Latn", "szl_Latn", "tam_Taml", "tat_Cyrl", "tel_Telu", "tgk_Cyrl", "tgl_Latn", "tha_Thai", "tir_Ethi", "taq_Latn", "taq_Tfng", "tpi_Latn", "tsn_Latn", "tso_Latn", "tuk_Latn", "tum_Latn", "tur_Latn", "twi_Latn", "tzm_Tfng", "uig_Arab", "ukr_Cyrl", "umb_Latn", "urd_Arab", "uzn_Latn", "vec_Latn", "vie_Latn", "war_Latn", "wol_Latn", "xho_Latn", "ydd_Hebr", "yor_Latn", "yue_Hant", "zho_Hans", "zho_Hant", "zul_Latn"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = VOCAB_FILES_NAMES
__UpperCAmelCase : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : List[str] = ['input_ids', 'attention_mask']
__UpperCAmelCase : Any = NllbTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , _a=False , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
__a = legacy_behaviour
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , legacy_behaviour=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''eng_Latn'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "eng_Latn" , _a = None , _a = "fra_Latn" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
if self.legacy_behaviour:
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
else:
__a = [self.cur_lang_code]
__a = [self.eos_token_id]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 367 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 0 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowercase_ = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
lowercase_ = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowercase_ = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowercase_ = sorted(arg_to_scheduler.keys())
lowercase_ = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __lowerCAmelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self , _a , _a=None , _a="base" , _a=None , _a=None , _a=None , **_a , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_a )
__a = 0
__a = Path(self.hparams.output_dir )
__a = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__a = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=_a , **_a , )
else:
__a = config
__a = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , _a , _a ):
assert hasattr(self.config , _a ), f'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , _a , getattr(self.hparams , _a ) )
if tokenizer is None:
__a = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_a , )
else:
__a = tokenizer
__a = MODEL_MODES[mode]
if model is None:
__a = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_a , )
else:
__a = model
def __UpperCAmelCase ( self , *_a , **_a ):
__a = self.model_type.from_pretrained(*_a , **_a )
def __UpperCAmelCase ( self ):
__a = arg_to_scheduler[self.hparams.lr_scheduler]
__a = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__a = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def __UpperCAmelCase ( self ):
__a = self.model
__a = ['''bias''', '''LayerNorm.weight''']
__a = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
__a = Adafactor(
_a , lr=self.hparams.learning_rate , scale_parameter=_a , relative_step=_a )
else:
__a = AdamW(
_a , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__a = optimizer
__a = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __UpperCAmelCase ( self , _a , _a ):
return self.validation_step(_a , _a )
def __UpperCAmelCase ( self , _a ):
return self.validation_end(_a )
def __UpperCAmelCase ( self ):
__a = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__a = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __UpperCAmelCase ( self , _a ):
if stage == "test":
__a = len(self.test_dataloader().dataset )
else:
__a = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=_a )
__a = len(self.train_dataloader().dataset )
def __UpperCAmelCase ( self , _a , _a , _a = False ):
raise NotImplementedError('''You must implement this for your task''' )
def __UpperCAmelCase ( self ):
return self.train_loader
def __UpperCAmelCase ( self ):
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=_a )
def __UpperCAmelCase ( self ):
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=_a )
def __UpperCAmelCase ( self , _a ):
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
_a , list(filter(_a , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __UpperCAmelCase ( self , _a ):
__a = self.output_dir.joinpath('''best_tfmr''' )
__a = self.step_count
self.model.save_pretrained(_a )
self.tokenizer.save_pretrained(_a )
@staticmethod
def __UpperCAmelCase ( _a , _a ):
parser.add_argument(
'''--model_name_or_path''' , default=_a , type=_a , required=_a , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=_a , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=_a , type=_a , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''cache''' ) , type=_a , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=_a , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=_a , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=_a , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=_a , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=_a , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=_a , metavar=_a , type=_a , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=_a , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=_a , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=_a , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=_a , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=_a )
parser.add_argument('''--train_batch_size''' , default=32 , type=_a )
parser.add_argument('''--eval_batch_size''' , default=32 , type=_a )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a ):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a ):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_a )
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a ):
__a = trainer.lr_schedulers[0]['''scheduler''']
__a = {f'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_a )
def __UpperCAmelCase ( self , _a , _a ):
rank_zero_info('''***** Validation results *****''' )
__a = trainer.callback_metrics
# Log results
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_a , str(metrics[key] ) ) )
def __UpperCAmelCase ( self , _a , _a ):
rank_zero_info('''***** Test results *****''' )
__a = trainer.callback_metrics
# Log and save results to file
__a = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(_a , '''w''' ) as writer:
for key in sorted(_a ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(_a , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(_a , str(metrics[key] ) ) )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] ) -> None:
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''model_checkpoints''' ) , type=lowerCAmelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowerCAmelCase__ , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=lowerCAmelCase__ )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=lowerCAmelCase__ , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=lowerCAmelCase__ , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase__ , default=42 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(lowerCAmelCase__ ).parent / '''test_run''' / '''dummy-train-data''' ) , type=lowerCAmelCase__ , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def lowercase ( lowerCAmelCase__ : BaseTransformer , lowerCAmelCase__ : argparse.Namespace , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[Any]=True , lowerCAmelCase__ : int=[] , lowerCAmelCase__ : str=None , lowerCAmelCase__ : List[str]=None , **lowerCAmelCase__ : str , ) -> Optional[int]:
pl.seed_everything(args.seed )
# init model
__a = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase__ )
# add custom checkpoints
if checkpoint_callback is None:
__a = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase__ )
if logging_callback is None:
__a = LoggingCallback()
__a = {}
if args.fpaa:
__a = 16
if args.gpus > 1:
__a = '''auto'''
__a = '''ddp'''
__a = args.accumulate_grad_batches
__a = None
__a = '''auto'''
__a = pl.Trainer.from_argparse_args(
lowerCAmelCase__ , weights_summary=lowerCAmelCase__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase__ , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase__ , )
if args.do_train:
trainer.fit(lowerCAmelCase__ )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 368 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=True , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=4 , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_attention_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_choices
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_attention_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = True
__a = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : int = True
__UpperCAmelCase : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCAmelCase ( self ):
__a = FlaxRobertaPreLayerNormModelTester(self )
@slow
def __UpperCAmelCase ( self ):
for model_class_name in self.all_model_classes:
__a = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=_a )
__a = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=_a )
__a = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__a = model(_a )[0]
__a = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , _a )
# compare the actual values for a slice.
__a = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=_a )
__a = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
__a = model(_a )[0]
# compare the actual values for a slice.
__a = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
| 369 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 370 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(lowerCAmelCase__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 0 |
"""simple docstring"""
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowercase_ = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any=None ) -> Any:
require_version(deps[pkg] , lowerCAmelCase__ )
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
import itertools
import os
import re
__lowerCAmelCase = re.compile(r"([A-Z]+)([A-Z][a-z])")
__lowerCAmelCase = re.compile(r"([a-z\d])([A-Z])")
__lowerCAmelCase = re.compile(r"(?<!_)_(?!_)")
__lowerCAmelCase = re.compile(r"(_{2,})")
__lowerCAmelCase = r"^\w+(\.\w+)*$"
__lowerCAmelCase = r"<>:/\|?*"
def lowercase ( lowerCAmelCase__ : List[Any] ) -> Optional[int]:
__a = _uppercase_uppercase_re.sub(r'''\1_\2''' , lowerCAmelCase__ )
__a = _lowercase_uppercase_re.sub(r'''\1_\2''' , lowerCAmelCase__ )
return name.lower()
def lowercase ( lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
__a = _single_underscore_re.split(lowerCAmelCase__ )
__a = [_multiple_underscores_re.split(lowerCAmelCase__ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowerCAmelCase__ ) if n != '''''' )
def lowercase ( lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
if os.path.basename(lowerCAmelCase__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any] ) -> Tuple:
if os.path.basename(lowerCAmelCase__ ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowerCAmelCase__ ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowerCAmelCase__ )}-{split}'''
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]=None ) -> Dict:
__a = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__ )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
__a = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
return f'''{filepath}*'''
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=None ) -> Optional[int]:
__a = filename_prefix_for_split(lowerCAmelCase__ , lowerCAmelCase__ )
__a = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
if shard_lengths:
__a = len(lowerCAmelCase__ )
__a = [f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowerCAmelCase__ )]
if filetype_suffix:
__a = [filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
__a = prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 351 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
lowercase_ = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
lowercase_ = typing.Union[np.floataa, int, float] # noqa: UP007
def lowercase ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(lowerCAmelCase__ ) - np.asarray(lowerCAmelCase__ )) ** 2 ) )
def lowercase ( lowerCAmelCase__ : Vector , lowerCAmelCase__ : Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowercase ( ) -> None:
from timeit import timeit
print('''Without Numpy''' )
print(
timeit(
'''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) )
print('''With Numpy''' )
print(
timeit(
'''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=10000 , globals=globals() , ) )
benchmark()
| 352 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 0 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
lowercase_ = parser.parse_args()
lowercase_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowercase_ = CLIPImageProcessor()
lowercase_ = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
lowercase_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 353 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 0 |
"""simple docstring"""
from collections.abc import Sequence
from queue import Queue
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a , _a=None , _a=None ):
__a = start
__a = end
__a = val
__a = (start + end) // 2
__a = left
__a = right
def __repr__( self ):
return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'''
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a ):
__a = collection
__a = function
if self.collection:
__a = self._build_tree(0 , len(_a ) - 1 )
def __UpperCAmelCase ( self , _a , _a ):
self._update_tree(self.root , _a , _a )
def __UpperCAmelCase ( self , _a , _a ):
return self._query_range(self.root , _a , _a )
def __UpperCAmelCase ( self , _a , _a ):
if start == end:
return SegmentTreeNode(_a , _a , self.collection[start] )
__a = (start + end) // 2
__a = self._build_tree(_a , _a )
__a = self._build_tree(mid + 1 , _a )
return SegmentTreeNode(_a , _a , self.fn(left.val , right.val ) , _a , _a )
def __UpperCAmelCase ( self , _a , _a , _a ):
if node.start == i and node.end == i:
__a = val
return
if i <= node.mid:
self._update_tree(node.left , _a , _a )
else:
self._update_tree(node.right , _a , _a )
__a = self.fn(node.left.val , node.right.val )
def __UpperCAmelCase ( self , _a , _a , _a ):
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , _a , _a )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , _a , node.mid ) , self._query_range(node.right , node.mid + 1 , _a ) , )
else:
# range in right child tree
return self._query_range(node.right , _a , _a )
def __UpperCAmelCase ( self ):
if self.root is not None:
__a = Queue()
queue.put(self.root )
while not queue.empty():
__a = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 5_0)
lowercase_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 354 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=32 , _a=2 , _a=3 , _a=16 , _a=[1, 2, 1] , _a=[2, 2, 4] , _a=2 , _a=2.0 , _a=True , _a=0.0 , _a=0.0 , _a=0.1 , _a="gelu" , _a=False , _a=True , _a=0.02 , _a=1E-5 , _a=True , _a=None , _a=True , _a=10 , _a=8 , _a=["stage1", "stage2", "stage3"] , _a=[1, 2, 3] , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = embed_dim
__a = depths
__a = num_heads
__a = window_size
__a = mlp_ratio
__a = qkv_bias
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = drop_path_rate
__a = hidden_act
__a = use_absolute_embeddings
__a = patch_norm
__a = layer_norm_eps
__a = initializer_range
__a = is_training
__a = scope
__a = use_labels
__a = type_sequence_label_size
__a = encoder_stride
__a = out_features
__a = out_indices
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = MaskFormerSwinModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
__a = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __UpperCAmelCase ( self , _a , _a , _a ):
__a = MaskFormerSwinBackbone(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_a ):
__a = ['''stem''']
__a = MaskFormerSwinBackbone(config=_a )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Dict = False
__UpperCAmelCase : List[Any] = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MaskFormerSwinModelTester(self )
__a = ConfigTester(self , config_class=_a , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ):
return
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_a )
@unittest.skip('''Swin does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_a ) , _a )
# Swin has a different seq_length
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(_a , _a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(_a , _a , _a , _a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__a = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
self.check_hidden_states_output(_a , _a , _a , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_a ):
__a = 0
return t
def check_equivalence(_a , _a , _a , _a={} ):
with torch.no_grad():
__a = model(**_a , return_dict=_a , **_a )
__a = model(**_a , return_dict=_a , **_a ).to_tuple()
def recursive_check(_a , _a ):
if isinstance(_a , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_a , _a ):
recursive_check(_a , _a )
elif isinstance(_a , _a ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_a , _a )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_a ) , set_nan_tensor_to_zero(_a ) , atol=1E-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(_a ).any()} and `inf`: {torch.isinf(_a )}. Dict has'''
f''' `nan`: {torch.isnan(_a ).any()} and `inf`: {torch.isinf(_a )}.'''
) , )
recursive_check(_a , _a )
for model_class in self.all_model_classes:
__a = model_class(_a )
model.to(_a )
model.eval()
__a = self._prepare_for_class(_a , _a )
__a = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a )
__a = self._prepare_for_class(_a , _a , return_labels=_a )
__a = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a )
__a = self._prepare_for_class(_a , _a )
__a = self._prepare_for_class(_a , _a )
check_equivalence(_a , _a , _a , {'''output_hidden_states''': True} )
__a = self._prepare_for_class(_a , _a , return_labels=_a )
__a = self._prepare_for_class(_a , _a , return_labels=_a )
check_equivalence(_a , _a , _a , {'''output_hidden_states''': True} )
@require_torch
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCAmelCase : Union[str, Any] = MaskFormerSwinConfig
def __UpperCAmelCase ( self ):
__a = MaskFormerSwinModelTester(self )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
__a = backbone_class(_a )
backbone.to(_a )
backbone.eval()
__a = backbone(**_a )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _a )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__a = backbone(**_a , output_hidden_states=_a )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__a , __a , __a = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__a = backbone(**_a , output_attentions=_a )
self.assertIsNotNone(outputs.attentions )
| 355 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
lowercase_ = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__UpperCAmelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The input training data file (a text file).'} )
__UpperCAmelCase : Optional[str] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__UpperCAmelCase : bool = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__UpperCAmelCase : Optional[int] = field(
default=__SCREAMING_SNAKE_CASE , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __UpperCAmelCase ( self ):
if self.train_file is not None:
__a = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
__a = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : PreTrainedTokenizerBase
__UpperCAmelCase : Union[bool, str, PaddingStrategy] = True
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Optional[int] = None
def __call__( self , _a ):
__a = '''label''' if '''label''' in features[0].keys() else '''labels'''
__a = [feature.pop(_a ) for feature in features]
__a = len(_a )
__a = len(features[0]['''input_ids'''] )
__a = [
[{k: v[i] for k, v in feature.items()} for i in range(_a )] for feature in features
]
__a = list(chain(*_a ) )
__a = self.tokenizer.pad(
_a , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
__a = {k: v.view(_a , _a , -1 ) for k, v in batch.items()}
# Add back labels
__a = torch.tensor(_a , dtype=torch.intaa )
return batch
def lowercase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__a , __a , __a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__a , __a , __a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , lowerCAmelCase__ , lowerCAmelCase__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__a = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase__ )
datasets.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.set_verbosity(lowerCAmelCase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
__a = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__a = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
__a = {}
if data_args.train_file is not None:
__a = data_args.train_file
if data_args.validation_file is not None:
__a = data_args.validation_file
__a = data_args.train_file.split('''.''' )[-1]
__a = load_dataset(
lowerCAmelCase__ , data_files=lowerCAmelCase__ , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
__a = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__a = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=lowerCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
__a = [f'''ending{i}''' for i in range(4 )]
__a = '''sent1'''
__a = '''sent2'''
if data_args.max_seq_length is None:
__a = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''' )
__a = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
__a = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(lowerCAmelCase__ : List[Any] ):
__a = [[context] * 4 for context in examples[context_name]]
__a = examples[question_header_name]
__a = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(lowerCAmelCase__ )
]
# Flatten out
__a = list(chain(*lowerCAmelCase__ ) )
__a = list(chain(*lowerCAmelCase__ ) )
# Tokenize
__a = tokenizer(
lowerCAmelCase__ , lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(lowerCAmelCase__ ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''' )
__a = raw_datasets['''train''']
if data_args.max_train_samples is not None:
__a = min(len(lowerCAmelCase__ ) , data_args.max_train_samples )
__a = train_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
__a = train_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''' )
__a = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
__a = min(len(lowerCAmelCase__ ) , data_args.max_eval_samples )
__a = eval_dataset.select(range(lowerCAmelCase__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
__a = eval_dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
__a = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=lowerCAmelCase__ , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(lowerCAmelCase__ : int ):
__a , __a = eval_predictions
__a = np.argmax(lowerCAmelCase__ , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
__a = Trainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=lowerCAmelCase__ , data_collator=lowerCAmelCase__ , compute_metrics=lowerCAmelCase__ , )
# Training
if training_args.do_train:
__a = None
if training_args.resume_from_checkpoint is not None:
__a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__a = last_checkpoint
__a = trainer.train(resume_from_checkpoint=lowerCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
__a = train_result.metrics
__a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase__ )
)
__a = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('''train''' , lowerCAmelCase__ )
trainer.save_metrics('''train''' , lowerCAmelCase__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__a = trainer.evaluate()
__a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase__ )
__a = min(lowerCAmelCase__ , len(lowerCAmelCase__ ) )
trainer.log_metrics('''eval''' , lowerCAmelCase__ )
trainer.save_metrics('''eval''' , lowerCAmelCase__ )
__a = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase__ )
else:
trainer.create_model_card(**lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> bool:
if num < 0:
return False
__a = num
__a = 0
while num > 0:
__a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
lowercase_ = TypeVar("T")
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self , _a , _a ):
__a = None
__a = len(_a )
__a = [any_type for _ in range(self.N )] + arr
__a = fnc
self.build()
def __UpperCAmelCase ( self ):
for p in range(self.N - 1 , 0 , -1 ):
__a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , _a , _a ):
p += self.N
__a = v
while p > 1:
__a = p // 2
__a = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def __UpperCAmelCase ( self , _a , _a ): # noqa: E741
__a , __a = l + self.N, r + self.N
__a = None
while l <= r:
if l % 2 == 1:
__a = self.st[l] if res is None else self.fn(_a , self.st[l] )
if r % 2 == 0:
__a = self.st[r] if res is None else self.fn(_a , self.st[r] )
__a , __a = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
lowercase_ = [1, 1_0, -2, 9, -3, 8, 4, -7, 5, 6, 1_1, -1_2]
lowercase_ = {
0: 7,
1: 2,
2: 6,
3: -1_4,
4: 5,
5: 4,
6: 7,
7: -1_0,
8: 9,
9: 1_0,
1_0: 1_2,
1_1: 1,
}
lowercase_ = SegmentTree(test_array, min)
lowercase_ = SegmentTree(test_array, max)
lowercase_ = SegmentTree(test_array, lambda a, b: a + b)
def lowercase ( ) -> None:
for i in range(len(lowerCAmelCase__ ) ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
__a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
__a = reduce(lowerCAmelCase__ , test_array[i : j + 1] )
__a = reduce(lambda lowerCAmelCase__ , lowerCAmelCase__ : a + b , test_array[i : j + 1] )
assert min_range == min_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert max_range == max_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
assert sum_range == sum_segment_tree.query(lowerCAmelCase__ , lowerCAmelCase__ )
test_all_segments()
for index, value in test_updates.items():
lowercase_ = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 358 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
from collections import namedtuple
lowercase_ = namedtuple("from_to", "from_ to")
lowercase_ = {
"cubicmeter": from_to(1, 1),
"litre": from_to(0.001, 1_0_0_0),
"kilolitre": from_to(1, 1),
"gallon": from_to(0.00454, 264.172),
"cubicyard": from_to(0.76455, 1.30795),
"cubicfoot": from_to(0.028, 35.3147),
"cup": from_to(0.000236588, 4226.75),
}
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> float:
if from_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'from_type\' value: {from_type!r} Supported values are:\n'''
+ ''', '''.join(lowerCAmelCase__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f'''Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'''
+ ''', '''.join(lowerCAmelCase__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 0 |
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowercase_ = object()
# For specifying empty leaf dict `{}`
lowercase_ = object()
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> Optional[int]:
__a = tuple((re.compile(x + '''$''' ) for x in qs) )
for i in range(len(lowerCAmelCase__ ) - len(lowerCAmelCase__ ) + 1 ):
__a = [x.match(lowerCAmelCase__ ) for x, y in zip(lowerCAmelCase__ , ks[i:] )]
if matches and all(lowerCAmelCase__ ):
return True
return False
def lowercase ( lowerCAmelCase__ : List[Any] ) -> Any:
def replace(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any ):
for rule, replacement in rules:
if _match(lowerCAmelCase__ , lowerCAmelCase__ ):
return replacement
return val
return replace
def lowercase ( ) -> List[Any]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P('''mp''' , lowerCAmelCase__ )),
(("transformer", "wte", "embedding"), P('''mp''' , lowerCAmelCase__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCAmelCase__ , '''mp''' )),
(("attention", "out_proj", "kernel"), P('''mp''' , lowerCAmelCase__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCAmelCase__ , '''mp''' )),
(("mlp", "c_fc", "bias"), P('''mp''' )),
(("mlp", "c_proj", "kernel"), P('''mp''' , lowerCAmelCase__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a = _get_partition_rules()
__a = _replacement_rules(lowerCAmelCase__ )
__a = {k: _unmatched for k in flatten_dict(lowerCAmelCase__ )}
__a = {k: replace(lowerCAmelCase__ , lowerCAmelCase__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCAmelCase__ ) )
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json",
"junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json",
"junnyu/roformer_chinese_char_small": (
"https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"
),
"junnyu/roformer_chinese_char_base": (
"https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"
),
"junnyu/roformer_small_discriminator": (
"https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"
),
"junnyu/roformer_small_generator": (
"https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'roformer'
def __init__( self , _a=50_000 , _a=None , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_536 , _a=2 , _a=0.02 , _a=1E-12 , _a=0 , _a=False , _a=True , **_a , ):
super().__init__(pad_token_id=_a , **_a )
__a = vocab_size
__a = hidden_size if embedding_size is None else embedding_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = rotary_value
__a = use_cache
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def __UpperCAmelCase ( self ):
if self.task == "multiple-choice":
__a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
__a = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 361 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 0 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Dict=None ) -> Tuple:
__a = None
if token is not None:
__a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
__a = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__a = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=lowerCAmelCase__ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase ( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int]=None ) -> Any:
__a = None
if token is not None:
__a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__a = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
__a = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__a = math.ceil((result['''total_count'''] - 100) / 100 )
for i in range(lowerCAmelCase__ ):
__a = requests.get(url + f'''&page={i + 2}''' , headers=lowerCAmelCase__ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
__a = None
if token is not None:
__a = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
__a = requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
__a = result.headers['''Location''']
__a = requests.get(lowerCAmelCase__ , allow_redirects=lowerCAmelCase__ )
__a = os.path.join(lowerCAmelCase__ , f'''{artifact_name}.zip''' )
with open(lowerCAmelCase__ , '''wb''' ) as fp:
fp.write(response.content )
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None ) -> Optional[Any]:
__a = []
__a = []
__a = None
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCAmelCase__ ) as f:
for line in f:
__a = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__a = line[: line.index(''': ''' )]
__a = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__a = line[len('''FAILED ''' ) :]
failed_tests.append(lowerCAmelCase__ )
elif filename == "job_name.txt":
__a = line
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
f'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` '''
f'''and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
''' problem.''' )
__a = None
if job_name and job_links:
__a = job_links.get(lowerCAmelCase__ , lowerCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
__a = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
return result
def lowercase ( lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]=None ) -> Any:
__a = []
__a = [os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ , job_links=lowerCAmelCase__ ) )
return errors
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]=None ) -> List[str]:
__a = Counter()
counter.update([x[1] for x in logs] )
__a = counter.most_common()
__a = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__a = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__a = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> int:
__a = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__a = test.split('''/''' )[2]
else:
__a = None
return test
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any]=None ) -> str:
__a = [(x[0], x[1], get_model(x[2] )) for x in logs]
__a = [x for x in logs if x[2] is not None]
__a = {x[2] for x in logs}
__a = {}
for test in tests:
__a = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__a = counter.most_common()
__a = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__a = sum(error_counts.values() )
if n_errors > 0:
__a = {'''count''': n_errors, '''errors''': error_counts}
__a = dict(sorted(r.items() , key=lambda lowerCAmelCase__ : item[1]["count"] , reverse=lowerCAmelCase__ ) )
return r
def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> List[str]:
__a = '''| no. | error | status |'''
__a = '''|-:|:-|:-|'''
__a = [header, sep]
for error in reduced_by_error:
__a = reduced_by_error[error]['''count''']
__a = f'''| {count} | {error[:100]} | |'''
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : int ) -> Optional[Any]:
__a = '''| model | no. of errors | major error | count |'''
__a = '''|-:|-:|-:|-:|'''
__a = [header, sep]
for model in reduced_by_model:
__a = reduced_by_model[model]['''count''']
__a , __a = list(reduced_by_model[model]['''errors'''].items() )[0]
__a = f'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
lowercase_ = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
lowercase_ = get_job_links(args.workflow_run_id, token=args.token)
lowercase_ = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
lowercase_ = k.find(" / ")
lowercase_ = k[index + len(" / ") :]
lowercase_ = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
lowercase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
lowercase_ = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
lowercase_ = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
lowercase_ = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
lowercase_ = reduce_by_error(errors)
lowercase_ = reduce_by_model(errors)
lowercase_ = make_github_table(reduced_by_error)
lowercase_ = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 0 |
"""simple docstring"""
from math import factorial
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : float ) -> float:
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
__a = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
__a = float(factorial(lowerCAmelCase__ ) )
coefficient /= factorial(lowerCAmelCase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.75))
| 363 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 0 |
"""simple docstring"""
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] ) -> List[Any]:
# Load checkpoint
__a = torch.load(lowerCAmelCase__ , map_location='''cpu''' )
__a = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
__a = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__a = v
else:
__a = v
__a = chkpt['''params''']
__a = {n: v for n, v in config.items() if not isinstance(lowerCAmelCase__ , (torch.FloatTensor, numpy.ndarray) )}
__a = chkpt['''dico_word2id''']
__a = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
__a = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
__a = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
__a = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(f'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
print(f'''Save vocab file to {pytorch_config_dump_path}''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 364 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = ComputeEnvironment.AMAZON_SAGEMAKER
__UpperCAmelCase : Tuple = True
__UpperCAmelCase : List[Any] = 'ml.p3.2xlarge'
__UpperCAmelCase : List[str] = 'accelerate_sagemaker_execution_role'
__UpperCAmelCase : Optional[Any] = 'hf-sm'
__UpperCAmelCase : Optional[int] = 'us-east-1'
__UpperCAmelCase : List[Any] = 1
__UpperCAmelCase : List[str] = 'accelerate-sagemaker-1'
__UpperCAmelCase : int = '1.6'
__UpperCAmelCase : List[Any] = '4.4'
__UpperCAmelCase : Any = 'train.py'
__UpperCAmelCase : Tuple = [
'--model_name_or_path',
'bert',
'--do_train',
'False',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
__UpperCAmelCase : Optional[Any] = [
'--model_name_or_path',
'bert',
'--do_train',
'--do_test',
'False',
'--do_predict',
'--epochs',
'3',
'--learning_rate',
'5e-5',
'--max_steps',
'50.5',
]
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__a = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , _a )
assert isinstance(converted_args['''do_train'''] , _a )
assert isinstance(converted_args['''epochs'''] , _a )
assert isinstance(converted_args['''learning_rate'''] , _a )
assert isinstance(converted_args['''max_steps'''] , _a )
with pytest.raises(_a ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 365 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 0 |
"""simple docstring"""
lowercase_ = {
"a": "AAAAA",
"b": "AAAAB",
"c": "AAABA",
"d": "AAABB",
"e": "AABAA",
"f": "AABAB",
"g": "AABBA",
"h": "AABBB",
"i": "ABAAA",
"j": "BBBAA",
"k": "ABAAB",
"l": "ABABA",
"m": "ABABB",
"n": "ABBAA",
"o": "ABBAB",
"p": "ABBBA",
"q": "ABBBB",
"r": "BAAAA",
"s": "BAAAB",
"t": "BAABA",
"u": "BAABB",
"v": "BBBAB",
"w": "BABAA",
"x": "BABAB",
"y": "BABBA",
"z": "BABBB",
" ": " ",
}
lowercase_ = {value: key for key, value in encode_dict.items()}
def lowercase ( lowerCAmelCase__ : str ) -> str:
__a = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def lowercase ( lowerCAmelCase__ : str ) -> str:
if set(lowerCAmelCase__ ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__a = ''''''
for word in coded.split():
while len(lowerCAmelCase__ ) != 0:
decoded += decode_dict[word[:5]]
__a = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 366 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase_ = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 367 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 0 |
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
lowercase_ = "\\n@inproceedings{snover-etal-2006-study,\n title = \"A Study of Translation Edit Rate with Targeted Human Annotation\",\n author = \"Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John\",\n booktitle = \"Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers\",\n month = aug # \" 8-12\",\n year = \"2006\",\n address = \"Cambridge, Massachusetts, USA\",\n publisher = \"Association for Machine Translation in the Americas\",\n url = \"https://aclanthology.org/2006.amta-papers.25\",\n pages = \"223--231\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowercase_ = "\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n"
lowercase_ = "\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n 'score' (float): TER score (num_edits / sum_ref_lengths * 100)\n 'num_edits' (int): The cumulative number of edits\n 'ref_length' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 150.0, 'num_edits': 15, 'ref_length': 10.0}\n\n Example 2:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 62.5, 'num_edits': 5, 'ref_length': 8.0}\n\n Example 3:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {'score': 57.14285714285714, 'num_edits': 6, 'ref_length': 10.5}\n\n Example 4:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 0.0, 'num_edits': 0, 'ref_length': 8.0}\n\n Example 5:\n >>> predictions = [\"does this sentence match??\",\n ... \"what about this sentence?\",\n ... \"What did the TER metric user say to the developer?\"]\n >>> references = [[\"does this sentence match\", \"does this sentence match!?!\"],\n ... [\"wHaT aBoUt ThIs SeNtEnCe?\", \"wHaT aBoUt ThIs SeNtEnCe?\"],\n ... [\"Your jokes are...\", \"...TERrible\"]]\n >>> ter = datasets.load_metric(\"ter\")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {'score': 100.0, 'num_edits': 10, 'ref_length': 10.0}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def __UpperCAmelCase ( self , _a , _a , _a = False , _a = False , _a = False , _a = False , ):
__a = len(references[0] )
if any(len(_a ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
__a = [[refs[i] for refs in references] for i in range(_a )]
__a = TER(
normalized=_a , no_punct=_a , asian_support=_a , case_sensitive=_a , )
__a = sb_ter.corpus_score(_a , _a )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 368 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 0 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 369 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 0 |
from __future__ import annotations
lowercase_ = "#"
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
__a = {}
def __UpperCAmelCase ( self , _a ):
__a = self._trie
for char in text:
if char not in trie:
__a = {}
__a = trie[char]
__a = True
def __UpperCAmelCase ( self , _a ):
__a = self._trie
for char in prefix:
if char in trie:
__a = trie[char]
else:
return []
return self._elements(_a )
def __UpperCAmelCase ( self , _a ):
__a = []
for c, v in d.items():
__a = [''' '''] if c == END else [(c + s) for s in self._elements(_a )]
result.extend(_a )
return tuple(_a )
lowercase_ = Trie()
lowercase_ = ("depart", "detergent", "daring", "dog", "deer", "deal")
for word in words:
trie.insert_word(word)
def lowercase ( lowerCAmelCase__ : str ) -> tuple:
__a = trie.find_word(lowerCAmelCase__ )
return tuple(string + word for word in suffixes )
def lowercase ( ) -> None:
print(autocomplete_using_trie('''de''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 370 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def lowercase ( lowerCAmelCase__ : Callable , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> np.ndarray:
__a = int(np.ceil((x_end - xa) / step_size ) )
__a = np.zeros((n + 1,) )
__a = ya
__a = xa
for k in range(lowerCAmelCase__ ):
__a = y[k] + step_size * ode_func(lowerCAmelCase__ , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 0 |
"""simple docstring"""
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = (EulerDiscreteScheduler,)
__UpperCAmelCase : int = 1_0
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_100,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __UpperCAmelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(_a , _a )
__a = model(_a , _a )
__a = scheduler.step(_a , _a , _a , generator=_a )
__a = output.prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma
__a = sample.to(_a )
for i, t in enumerate(scheduler.timesteps ):
__a = scheduler.scale_model_input(_a , _a )
__a = model(_a , _a )
__a = scheduler.step(_a , _a , _a , generator=_a )
__a = output.prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 0.0002 ) < 1E-2
assert abs(result_mean.item() - 2.2_676E-06 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__a = sample.to(_a )
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(_a , _a )
__a = model(_a , _a )
__a = scheduler.step(_a , _a , _a , generator=_a )
__a = output.prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 10.0807 ) < 1E-2
assert abs(result_mean.item() - 0.0131 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a , use_karras_sigmas=_a )
scheduler.set_timesteps(self.num_inference_steps , device=_a )
__a = torch.manual_seed(0 )
__a = self.dummy_model()
__a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__a = sample.to(_a )
for t in scheduler.timesteps:
__a = scheduler.scale_model_input(_a , _a )
__a = model(_a , _a )
__a = scheduler.step(_a , _a , _a , generator=_a )
__a = output.prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1E-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1E-3
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
__lowerCAmelCase = None
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__lowerCAmelCase = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
},
"tokenizer_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json",
},
}
__lowerCAmelCase = {
"albert-base-v1": 5_1_2,
"albert-large-v1": 5_1_2,
"albert-xlarge-v1": 5_1_2,
"albert-xxlarge-v1": 5_1_2,
"albert-base-v2": 5_1_2,
"albert-large-v2": 5_1_2,
"albert-xlarge-v2": 5_1_2,
"albert-xxlarge-v2": 5_1_2,
}
__lowerCAmelCase = "▁"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : int = AlbertTokenizer
def __init__( self , _a=None , _a=None , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , **_a , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = False if not self.vocab_file else True
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 351 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 0 |
"""simple docstring"""
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
lowercase_ = logging.getLogger(__name__)
def lowercase ( lowerCAmelCase__ : str ) -> Optional[int]:
__a = git.Repo(search_parent_directories=lowerCAmelCase__ )
__a = {
'''repo_id''': str(lowerCAmelCase__ ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(lowerCAmelCase__ , '''git_log.json''' ) , '''w''' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ , indent=4 )
def lowercase ( lowerCAmelCase__ : int ) -> List[str]:
if params.n_gpu <= 0:
__a = 0
__a = -1
__a = True
__a = False
return
assert torch.cuda.is_available()
logger.info('''Initializing GPUs''' )
if params.n_gpu > 1:
assert params.local_rank != -1
__a = int(os.environ['''WORLD_SIZE'''] )
__a = int(os.environ['''N_GPU_NODE'''] )
__a = int(os.environ['''RANK'''] )
# number of nodes / node ID
__a = params.world_size // params.n_gpu_per_node
__a = params.global_rank // params.n_gpu_per_node
__a = True
assert params.n_nodes == int(os.environ['''N_NODES'''] )
assert params.node_id == int(os.environ['''NODE_RANK'''] )
# local job (single GPU)
else:
assert params.local_rank == -1
__a = 1
__a = 0
__a = 0
__a = 0
__a = 1
__a = 1
__a = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
__a = params.node_id == 0 and params.local_rank == 0
__a = params.n_nodes > 1
# summary
__a = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + '''Number of nodes: %i''' % params.n_nodes )
logger.info(PREFIX + '''Node ID : %i''' % params.node_id )
logger.info(PREFIX + '''Local rank : %i''' % params.local_rank )
logger.info(PREFIX + '''World size : %i''' % params.world_size )
logger.info(PREFIX + '''GPUs per node : %i''' % params.n_gpu_per_node )
logger.info(PREFIX + '''Master : %s''' % str(params.is_master ) )
logger.info(PREFIX + '''Multi-node : %s''' % str(params.multi_node ) )
logger.info(PREFIX + '''Multi-GPU : %s''' % str(params.multi_gpu ) )
logger.info(PREFIX + '''Hostname : %s''' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('''Initializing PyTorch distributed''' )
torch.distributed.init_process_group(
init_method='''env://''' , backend='''nccl''' , )
def lowercase ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 352 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase_ = logging.get_logger(__name__)
lowercase_ = OrderedDict(
[
("audio-spectrogram-transformer", "ASTFeatureExtractor"),
("beit", "BeitFeatureExtractor"),
("chinese_clip", "ChineseCLIPFeatureExtractor"),
("clap", "ClapFeatureExtractor"),
("clip", "CLIPFeatureExtractor"),
("clipseg", "ViTFeatureExtractor"),
("conditional_detr", "ConditionalDetrFeatureExtractor"),
("convnext", "ConvNextFeatureExtractor"),
("cvt", "ConvNextFeatureExtractor"),
("data2vec-audio", "Wav2Vec2FeatureExtractor"),
("data2vec-vision", "BeitFeatureExtractor"),
("deformable_detr", "DeformableDetrFeatureExtractor"),
("deit", "DeiTFeatureExtractor"),
("detr", "DetrFeatureExtractor"),
("dinat", "ViTFeatureExtractor"),
("donut-swin", "DonutFeatureExtractor"),
("dpt", "DPTFeatureExtractor"),
("encodec", "EncodecFeatureExtractor"),
("flava", "FlavaFeatureExtractor"),
("glpn", "GLPNFeatureExtractor"),
("groupvit", "CLIPFeatureExtractor"),
("hubert", "Wav2Vec2FeatureExtractor"),
("imagegpt", "ImageGPTFeatureExtractor"),
("layoutlmv2", "LayoutLMv2FeatureExtractor"),
("layoutlmv3", "LayoutLMv3FeatureExtractor"),
("levit", "LevitFeatureExtractor"),
("maskformer", "MaskFormerFeatureExtractor"),
("mctct", "MCTCTFeatureExtractor"),
("mobilenet_v1", "MobileNetV1FeatureExtractor"),
("mobilenet_v2", "MobileNetV2FeatureExtractor"),
("mobilevit", "MobileViTFeatureExtractor"),
("nat", "ViTFeatureExtractor"),
("owlvit", "OwlViTFeatureExtractor"),
("perceiver", "PerceiverFeatureExtractor"),
("poolformer", "PoolFormerFeatureExtractor"),
("regnet", "ConvNextFeatureExtractor"),
("resnet", "ConvNextFeatureExtractor"),
("segformer", "SegformerFeatureExtractor"),
("sew", "Wav2Vec2FeatureExtractor"),
("sew-d", "Wav2Vec2FeatureExtractor"),
("speech_to_text", "Speech2TextFeatureExtractor"),
("speecht5", "SpeechT5FeatureExtractor"),
("swiftformer", "ViTFeatureExtractor"),
("swin", "ViTFeatureExtractor"),
("swinv2", "ViTFeatureExtractor"),
("table-transformer", "DetrFeatureExtractor"),
("timesformer", "VideoMAEFeatureExtractor"),
("tvlt", "TvltFeatureExtractor"),
("unispeech", "Wav2Vec2FeatureExtractor"),
("unispeech-sat", "Wav2Vec2FeatureExtractor"),
("van", "ConvNextFeatureExtractor"),
("videomae", "VideoMAEFeatureExtractor"),
("vilt", "ViltFeatureExtractor"),
("vit", "ViTFeatureExtractor"),
("vit_mae", "ViTFeatureExtractor"),
("vit_msn", "ViTFeatureExtractor"),
("wav2vec2", "Wav2Vec2FeatureExtractor"),
("wav2vec2-conformer", "Wav2Vec2FeatureExtractor"),
("wavlm", "Wav2Vec2FeatureExtractor"),
("whisper", "WhisperFeatureExtractor"),
("xclip", "CLIPFeatureExtractor"),
("yolos", "YolosFeatureExtractor"),
]
)
lowercase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase ( lowerCAmelCase__ : str ) -> int:
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
__a = model_type_to_module_name(lowerCAmelCase__ )
__a = importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase__ , '''__name__''' , lowerCAmelCase__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__a = importlib.import_module('''transformers''' )
if hasattr(lowerCAmelCase__ , lowerCAmelCase__ ):
return getattr(lowerCAmelCase__ , lowerCAmelCase__ )
return None
def lowercase ( lowerCAmelCase__ : Union[str, os.PathLike] , lowerCAmelCase__ : Optional[Union[str, os.PathLike]] = None , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False , lowerCAmelCase__ : Optional[Dict[str, str]] = None , lowerCAmelCase__ : Optional[Union[bool, str]] = None , lowerCAmelCase__ : Optional[str] = None , lowerCAmelCase__ : bool = False , **lowerCAmelCase__ : List[Any] , ) -> Tuple:
__a = get_file_from_repo(
lowerCAmelCase__ , lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , force_download=lowerCAmelCase__ , resume_download=lowerCAmelCase__ , proxies=lowerCAmelCase__ , use_auth_token=lowerCAmelCase__ , revision=lowerCAmelCase__ , local_files_only=lowerCAmelCase__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as reader:
return json.load(lowerCAmelCase__ )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_a )
def __UpperCAmelCase ( cls , _a , **_a ):
__a = kwargs.pop('''config''' , _a )
__a = kwargs.pop('''trust_remote_code''' , _a )
__a = True
__a , __a = FeatureExtractionMixin.get_feature_extractor_dict(_a , **_a )
__a = config_dict.get('''feature_extractor_type''' , _a )
__a = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
__a = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_a , _a ):
__a = AutoConfig.from_pretrained(_a , **_a )
# It could be in `config.feature_extractor_type``
__a = getattr(_a , '''feature_extractor_type''' , _a )
if hasattr(_a , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
__a = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
__a = feature_extractor_class_from_name(_a )
__a = feature_extractor_auto_map is not None
__a = feature_extractor_class is not None or type(_a ) in FEATURE_EXTRACTOR_MAPPING
__a = resolve_trust_remote_code(
_a , _a , _a , _a )
if has_remote_code and trust_remote_code:
__a = get_class_from_dynamic_module(
_a , _a , **_a )
__a = kwargs.pop('''code_revision''' , _a )
if os.path.isdir(_a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_a , **_a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_a , **_a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_a ) in FEATURE_EXTRACTOR_MAPPING:
__a = FEATURE_EXTRACTOR_MAPPING[type(_a )]
return feature_extractor_class.from_dict(_a , **_a )
raise ValueError(
f'''Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a '''
f'''`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def __UpperCAmelCase ( _a , _a ):
FEATURE_EXTRACTOR_MAPPING.register(_a , _a )
| 353 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 0 |
"""simple docstring"""
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=7 , _a=True , _a=True , _a=False , _a=True , _a=99 , _a=32 , _a=5 , _a=4 , _a=37 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=16 , _a=2 , _a=0.02 , _a=3 , _a=4 , _a=None , ):
__a = parent
__a = batch_size
__a = seq_length
__a = is_training
__a = use_input_mask
__a = use_token_type_ids
__a = use_labels
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = type_vocab_size
__a = type_sequence_label_size
__a = initializer_range
__a = num_labels
__a = num_choices
__a = scope
def __UpperCAmelCase ( self ):
__a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a = None
if self.use_input_mask:
__a = random_attention_mask([self.batch_size, self.seq_length] )
__a = None
if self.use_token_type_ids:
__a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a = None
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a = ids_tensor([self.batch_size] , self.num_choices )
__a = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self ):
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a ):
__a = BioGptModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a )
__a = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
__a = BioGptForCausalLM(config=_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = BioGptModel(config=_a )
model.to(_a )
model.eval()
# create attention mask
__a = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
__a = self.seq_length // 2
__a = 0
# first forward pass
__a , __a = model(_a , attention_mask=_a ).to_tuple()
# create hypothetical next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 1) , config.vocab_size )
# change a random masked slice from input_ids
__a = ids_tensor((1,) , _a ).item() + 1
__a = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 )
__a = random_other_next_tokens
# append to next input_ids and attn_mask
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=_a )] , dim=1 , )
# get two different outputs
__a = model(_a , attention_mask=_a )['''last_hidden_state''']
__a = model(_a , past_key_values=_a , attention_mask=_a )['''last_hidden_state''']
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -1, random_slice_idx].detach()
__a = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = BioGptModel(config=_a ).to(_a ).eval()
__a = torch.ones(input_ids.shape , dtype=torch.long , device=_a )
# first forward pass
__a = model(_a , attention_mask=_a , use_cache=_a )
__a , __a = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__a = ids_tensor((self.batch_size, 3) , config.vocab_size )
__a = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
__a = torch.cat([input_ids, next_tokens] , dim=-1 )
__a = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
__a = model(_a , attention_mask=_a )['''last_hidden_state''']
__a = model(_a , attention_mask=_a , past_key_values=_a )[
'''last_hidden_state'''
]
# select random slice
__a = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__a = output_from_no_past[:, -3:, random_slice_idx].detach()
__a = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a , _a , atol=1E-3 ) )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a , _a=False ):
__a = BioGptForCausalLM(_a )
model.to(_a )
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
result.loss.backward()
def __UpperCAmelCase ( self , _a , *_a ):
__a = BioGptModel(_a )
__a = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers )
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 )
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a , *_a ):
__a = self.num_labels
__a = BioGptForTokenClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) = config_and_inputs
__a = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
__UpperCAmelCase : List[Any] = (BioGptForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase : str = (
{
'feature-extraction': BioGptModel,
'text-classification': BioGptForSequenceClassification,
'text-generation': BioGptForCausalLM,
'token-classification': BioGptForTokenClassification,
'zero-shot': BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Dict = False
def __UpperCAmelCase ( self ):
__a = BioGptModelTester(self )
__a = ConfigTester(self , config_class=_a , hidden_size=37 )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__a = type
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*_a , gradient_checkpointing=_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*_a )
@slow
def __UpperCAmelCase ( self ):
__a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_a )
__a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__a = '''left'''
# Define PAD Token = EOS Token = 50256
__a = tokenizer.eos_token
__a = model.config.eos_token_id
# use different length sentences to test batching
__a = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__a = tokenizer(_a , return_tensors='''pt''' , padding=_a )
__a = inputs['''input_ids'''].to(_a )
__a = model.generate(
input_ids=_a , attention_mask=inputs['''attention_mask'''].to(_a ) , )
__a = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(_a )
__a = model.generate(input_ids=_a )
__a = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__a = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(_a )
__a = model.generate(input_ids=_a , max_length=model.config.max_length - num_paddings )
__a = tokenizer.batch_decode(_a , skip_special_tokens=_a )
__a = tokenizer.decode(output_non_padded[0] , skip_special_tokens=_a )
__a = tokenizer.decode(output_padded[0] , skip_special_tokens=_a )
__a = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(_a , _a )
self.assertListEqual(_a , [non_padded_sentence, padded_sentence] )
@slow
def __UpperCAmelCase ( self ):
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = BioGptModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = input_dict['''input_ids''']
__a = input_ids.ne(1 ).to(_a )
__a = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__a = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = 3
__a = '''multi_label_classification'''
__a = input_dict['''input_ids''']
__a = input_ids.ne(1 ).to(_a )
__a = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__a = BioGptForSequenceClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , attention_mask=_a , labels=_a )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
__a = torch.tensor([[2, 4_805, 9, 656, 21]] )
__a = model(_a )[0]
__a = 42_384
__a = torch.Size((1, 5, vocab_size) )
self.assertEqual(output.shape , _a )
__a = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' )
__a = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' )
model.to(_a )
torch.manual_seed(0 )
__a = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(_a )
__a = model.generate(
**_a , min_length=100 , max_length=1_024 , num_beams=5 , early_stopping=_a , )
__a = tokenizer.decode(output_ids[0] , skip_special_tokens=_a )
__a = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(_a , _a )
| 354 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 355 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self ):
__a = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
__a = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(_a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1E-3 ) )
@slow
def __UpperCAmelCase ( self ):
__a = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
__a = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
__a = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
__a = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
__a = model(_a )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _a )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _a , atol=1E-3 ) )
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "spiece.model"}
lowercase_ = {
"vocab_file": {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/spiece.model",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/spiece.model",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/spiece.model",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/spiece.model",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model",
}
}
lowercase_ = {
"albert-base-v1": 5_1_2,
"albert-large-v1": 5_1_2,
"albert-xlarge-v1": 5_1_2,
"albert-xxlarge-v1": 5_1_2,
"albert-base-v2": 5_1_2,
"albert-large-v2": 5_1_2,
"albert-xlarge-v2": 5_1_2,
"albert-xxlarge-v2": 5_1_2,
}
lowercase_ = "▁"
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a , _a=True , _a=True , _a=False , _a="[CLS]" , _a="[SEP]" , _a="<unk>" , _a="[SEP]" , _a="<pad>" , _a="[CLS]" , _a="[MASK]" , _a = None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__a = (
AddedToken(_a , lstrip=_a , rstrip=_a , normalized=_a )
if isinstance(_a , _a )
else mask_token
)
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_a , remove_space=_a , keep_accents=_a , bos_token=_a , eos_token=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , sp_model_kwargs=self.sp_model_kwargs , **_a , )
__a = do_lower_case
__a = remove_space
__a = keep_accents
__a = vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def __UpperCAmelCase ( self ):
return len(self.sp_model )
def __UpperCAmelCase ( self ):
__a = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , _a ):
__a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self , _a ):
if self.remove_space:
__a = ''' '''.join(inputs.strip().split() )
else:
__a = inputs
__a = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
__a = unicodedata.normalize('''NFKD''' , _a )
__a = ''''''.join([c for c in outputs if not unicodedata.combining(_a )] )
if self.do_lower_case:
__a = outputs.lower()
return outputs
def __UpperCAmelCase ( self , _a ):
__a = self.preprocess_text(_a )
__a = self.sp_model.encode(_a , out_type=_a )
__a = []
for piece in pieces:
if len(_a ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
__a = self.sp_model.EncodeAsPieces(piece[:-1].replace(_a , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
__a = cur_pieces[1:]
else:
__a = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_a )
else:
new_pieces.append(_a )
return new_pieces
def __UpperCAmelCase ( self , _a ):
return self.sp_model.PieceToId(_a )
def __UpperCAmelCase ( self , _a ):
return self.sp_model.IdToPiece(_a )
def __UpperCAmelCase ( self , _a ):
__a = []
__a = ''''''
__a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
__a = True
__a = []
else:
current_sub_tokens.append(_a )
__a = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
if token_ids_a is not None:
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1]
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCAmelCase ( self , _a , _a = None ):
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _a )
elif not os.path.isfile(self.vocab_file ):
with open(_a , '''wb''' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 357 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a = 0 ):
__a , __a = row, column
__a = [[default_value for c in range(_a )] for r in range(_a )]
def __str__( self ):
__a = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
__a = 0
for row_vector in self.array:
for obj in row_vector:
__a = max(_a , len(str(_a ) ) )
__a = f'''%{max_element_length}s'''
# Make string and return
def single_line(_a ) -> str:
nonlocal string_format_identifier
__a = '''['''
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_a ) for row_vector in self.array )
return s
def __repr__( self ):
return str(self )
def __UpperCAmelCase ( self , _a ):
if not (isinstance(_a , (list, tuple) ) and len(_a ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , _a ):
assert self.validate_indicies(_a )
return self.array[loc[0]][loc[1]]
def __setitem__( self , _a , _a ):
assert self.validate_indicies(_a )
__a = value
def __add__( self , _a ):
assert isinstance(_a , _a )
assert self.row == another.row and self.column == another.column
# Add
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] + another[r, c]
return result
def __neg__( self ):
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = -self[r, c]
return result
def __sub__( self , _a ):
return self + (-another)
def __mul__( self , _a ):
if isinstance(_a , (int, float) ): # Scalar multiplication
__a = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c] * another
return result
elif isinstance(_a , _a ): # Matrix multiplication
assert self.column == another.row
__a = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__a = f'''Unsupported type given for another ({type(_a )})'''
raise TypeError(_a )
def __UpperCAmelCase ( self ):
__a = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__a = self[r, c]
return result
def __UpperCAmelCase ( self , _a , _a ):
assert isinstance(_a , _a ) and isinstance(_a , _a )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__a = v.transpose()
__a = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowercase ( ) -> None:
# a^(-1)
__a = Matrix(3 , 3 , 0 )
for i in range(3 ):
__a = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 1, 2, -3
__a = Matrix(3 , 1 , 0 )
__a , __a , __a = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
def lowercase ( ) -> None:
import doctest
doctest.testmod()
testa()
| 358 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
return int(input_a == input_a == 0 )
def lowercase ( ) -> None:
print('''Truth Table of NOR Gate:''' )
print('''| Input 1 | Input 2 | Output |''' )
print(f'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(f'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(f'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(f'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 359 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase_ = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'timesformer'
def __init__( self , _a=224 , _a=16 , _a=3 , _a=8 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-6 , _a=True , _a="divided_space_time" , _a=0 , **_a , ):
super().__init__(**_a )
__a = image_size
__a = patch_size
__a = num_channels
__a = num_frames
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = qkv_bias
__a = attention_type
__a = drop_path_rate
| 361 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 0 |
"""simple docstring"""
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self ):
__a = {}
def __UpperCAmelCase ( self ):
print(self.vertex )
for i in self.vertex:
print(_a , ''' -> ''' , ''' -> '''.join([str(_a ) for j in self.vertex[i]] ) )
def __UpperCAmelCase ( self , _a , _a ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(_a )
else:
# else make a new vertex
__a = [to_vertex]
def __UpperCAmelCase ( self ):
# visited array for storing already visited nodes
__a = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(_a , _a )
def __UpperCAmelCase ( self , _a , _a ):
# mark start vertex as visited
__a = True
print(_a , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(_a , _a )
if __name__ == "__main__":
lowercase_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowercase_ = {
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 363 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = '''hf-internal-testing/tiny-random-t5'''
__a = AutoTokenizer.from_pretrained(_a )
__a = AutoModelForSeqaSeqLM.from_pretrained(_a )
__a = tokenizer('''This is me''' , return_tensors='''pt''' )
__a = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
__a = model.generate(**_a )
__a = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
__a = AutoModelForSeqaSeqLM.from_pretrained(_a )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
__a = model_reloaded.generate(**_a )
self.assertTrue(torch.allclose(_a , _a ) )
def __UpperCAmelCase ( self ):
__a = '''hf-internal-testing/tiny-random-t5'''
__a = AutoModelForSeqaSeqLM.from_pretrained(_a )
__a = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_a ):
model.save_pretrained(_a )
__a = model.reverse_bettertransformer()
model.save_pretrained(_a )
| 364 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 0 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 365 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'roc_bert'
def __init__( self , _a=30_522 , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=512 , _a=2 , _a=0.02 , _a=1E-12 , _a=True , _a=0 , _a="absolute" , _a=None , _a=True , _a=True , _a=768 , _a=910 , _a=512 , _a=24_858 , _a=True , **_a , ):
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = type_vocab_size
__a = layer_norm_eps
__a = use_cache
__a = enable_pronunciation
__a = enable_shape
__a = pronunciation_embed_dim
__a = pronunciation_vocab_size
__a = shape_embed_dim
__a = shape_vocab_size
__a = concat_input
__a = position_embedding_type
__a = classifier_dropout
super().__init__(pad_token_id=_a , **_a )
| 366 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 0 |
"""simple docstring"""
import math
import unittest
def lowercase ( lowerCAmelCase__ : int ) -> bool:
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
self.assertTrue(is_prime(2 ) )
self.assertTrue(is_prime(3 ) )
self.assertTrue(is_prime(5 ) )
self.assertTrue(is_prime(7 ) )
self.assertTrue(is_prime(11 ) )
self.assertTrue(is_prime(13 ) )
self.assertTrue(is_prime(17 ) )
self.assertTrue(is_prime(19 ) )
self.assertTrue(is_prime(23 ) )
self.assertTrue(is_prime(29 ) )
def __UpperCAmelCase ( self ):
with self.assertRaises(_a ):
is_prime(-19 )
self.assertFalse(
is_prime(0 ) , '''Zero doesn\'t have any positive factors, primes must have exactly two.''' , )
self.assertFalse(
is_prime(1 ) , '''One only has 1 positive factor, primes must have exactly two.''' , )
self.assertFalse(is_prime(2 * 2 ) )
self.assertFalse(is_prime(2 * 3 ) )
self.assertFalse(is_prime(3 * 3 ) )
self.assertFalse(is_prime(3 * 5 ) )
self.assertFalse(is_prime(3 * 5 * 7 ) )
if __name__ == "__main__":
unittest.main()
| 367 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'trocr'
__UpperCAmelCase : Union[str, Any] = ['past_key_values']
__UpperCAmelCase : int = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , _a=50_265 , _a=1_024 , _a=12 , _a=16 , _a=4_096 , _a="gelu" , _a=512 , _a=0.1 , _a=0.0 , _a=0.0 , _a=2 , _a=0.02 , _a=0.0 , _a=True , _a=False , _a=True , _a=True , _a=1 , _a=0 , _a=2 , **_a , ):
__a = vocab_size
__a = d_model
__a = decoder_layers
__a = decoder_attention_heads
__a = decoder_ffn_dim
__a = activation_function
__a = max_position_embeddings
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = init_std
__a = decoder_layerdrop
__a = use_cache
__a = scale_embedding
__a = use_learned_position_embeddings
__a = layernorm_embedding
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , decoder_start_token_id=_a , **_a , )
| 368 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 0 |
"""simple docstring"""
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ = re.compile(r"^(?P<major>\d+)" r"\.(?P<minor>\d+)" r"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__UpperCAmelCase : str
__UpperCAmelCase : Optional[str] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
__UpperCAmelCase : Optional[Union[str, int]] = None
def __UpperCAmelCase ( self ):
__a , __a , __a = _str_to_version_tuple(self.version_str )
def __repr__( self ):
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def __UpperCAmelCase ( self ):
return self.major, self.minor, self.patch
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
return Version(_a )
elif isinstance(_a , _a ):
return other
raise TypeError(f'''{other} (type {type(_a )}) cannot be compared to version.''' )
def __eq__( self , _a ):
try:
__a = self._validate_operand(_a )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , _a ):
__a = self._validate_operand(_a )
return self.tuple < other.tuple
def __hash__( self ):
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __UpperCAmelCase ( cls , _a ):
__a = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __UpperCAmelCase ( self ):
return self.version_str
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Tuple:
__a = _VERSION_REG.match(lowerCAmelCase__ )
if not res:
raise ValueError(f'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(lowerCAmelCase__ ) for v in [res.group('''major''' ), res.group('''minor''' ), res.group('''patch''' )] )
def lowercase ( lowerCAmelCase__ : str ) -> Optional[Any]:
return ".".join(str(lowerCAmelCase__ ) for v in version_tuple )
| 369 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'time_series_transformer'
__UpperCAmelCase : Tuple = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self , _a = None , _a = None , _a = "student_t" , _a = "nll" , _a = 1 , _a = [1, 2, 3, 4, 5, 6, 7] , _a = "mean" , _a = 0 , _a = 0 , _a = 0 , _a = 0 , _a = None , _a = None , _a = 32 , _a = 32 , _a = 2 , _a = 2 , _a = 2 , _a = 2 , _a = True , _a = "gelu" , _a = 64 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 0.1 , _a = 100 , _a = 0.02 , _a=True , **_a , ):
# time series specific configuration
__a = prediction_length
__a = context_length or prediction_length
__a = distribution_output
__a = loss
__a = input_size
__a = num_time_features
__a = lags_sequence
__a = scaling
__a = num_dynamic_real_features
__a = num_static_real_features
__a = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
__a = cardinality
else:
__a = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_a ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
__a = embedding_dimension
else:
__a = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__a = num_parallel_samples
# Transformer architecture configuration
__a = input_size * len(_a ) + self._number_of_features
__a = d_model
__a = encoder_attention_heads
__a = decoder_attention_heads
__a = encoder_ffn_dim
__a = decoder_ffn_dim
__a = encoder_layers
__a = decoder_layers
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = activation_function
__a = init_std
__a = use_cache
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __UpperCAmelCase ( self ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 370 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : int ) -> list[tuple[int, int]]:
__a , __a = position
__a = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__a = []
for position in positions:
__a , __a = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(lowerCAmelCase__ )
return permissible_positions
def lowercase ( lowerCAmelCase__ : list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def lowercase ( lowerCAmelCase__ : list[list[int]] , lowerCAmelCase__ : tuple[int, int] , lowerCAmelCase__ : int ) -> bool:
if is_complete(lowerCAmelCase__ ):
return True
for position in get_valid_pos(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
__a , __a = position
if board[y][x] == 0:
__a = curr + 1
if open_knight_tour_helper(lowerCAmelCase__ , lowerCAmelCase__ , curr + 1 ):
return True
__a = 0
return False
def lowercase ( lowerCAmelCase__ : int ) -> list[list[int]]:
__a = [[0 for i in range(lowerCAmelCase__ )] for j in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
__a = 1
if open_knight_tour_helper(lowerCAmelCase__ , (i, j) , 1 ):
return board
__a = 0
__a = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(lowerCAmelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 371 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 0 |
"""simple docstring"""
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def lowercase ( lowerCAmelCase__ : int ) -> int:
__a = prime_factors(lowerCAmelCase__ )
if is_square_free(lowerCAmelCase__ ):
return -1 if len(lowerCAmelCase__ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
__lowerCAmelCase = 5_0_0_0_0
__lowerCAmelCase = 5_0_0_0
__lowerCAmelCase , __lowerCAmelCase = os.path.split(__file__)
__lowerCAmelCase = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Any ) -> int:
for i in range(lowerCAmelCase__ ):
__a = dataset[i]
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]:
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
__a = dataset[i : i + batch_size]
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] ) -> Any:
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
__a = dataset[i]
@get_duration
def lowercase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple ) -> List[Any]:
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
__a = dataset[i : i + batch_size]
def lowercase ( ) -> Dict:
__a = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__a = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
__a = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 10}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 100}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1000}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 10}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__a = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__a = generate_example_dataset(
os.path.join(lowerCAmelCase__ , '''dataset.arrow''' ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={'''list''': (100,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
__a = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print('''shuffling dataset''' )
__a = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(lowerCAmelCase__ ) )
__a = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''wb''' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 351 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json",
"funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json",
"funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json",
"funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json",
"funnel-transformer/intermediate": (
"https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"
),
"funnel-transformer/intermediate-base": (
"https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"
),
"funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json",
"funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json",
"funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json",
"funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = 'funnel'
__UpperCAmelCase : Union[str, Any] = {
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
}
def __init__( self , _a=30_522 , _a=[4, 4, 4] , _a=None , _a=2 , _a=768 , _a=12 , _a=64 , _a=3_072 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1E-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ):
__a = vocab_size
__a = block_sizes
__a = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__a = num_decoder_layers
__a = d_model
__a = n_head
__a = d_head
__a = d_inner
__a = hidden_act
__a = hidden_dropout
__a = attention_dropout
__a = activation_dropout
__a = initializer_range
__a = initializer_std
__a = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__a = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__a = attention_type
__a = separate_cls
__a = truncate_seq
__a = pool_q_only
super().__init__(**_a )
@property
def __UpperCAmelCase ( self ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __UpperCAmelCase ( self ):
return len(self.block_sizes )
@num_blocks.setter
def __UpperCAmelCase ( self , _a ):
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 11 | 0 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
__a = -1
__a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__a = model.generate(_a , max_new_tokens=10 , do_sample=_a )
__a = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__a = TextStreamer(_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__a = cs.out[:-1]
self.assertEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
__a = -1
__a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__a = model.generate(_a , max_new_tokens=10 , do_sample=_a )
__a = tokenizer.decode(greedy_ids[0] )
__a = TextIteratorStreamer(_a )
__a = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__a = Thread(target=model.generate , kwargs=_a )
thread.start()
__a = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_a , _a )
def __UpperCAmelCase ( self ):
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
__a = -1
__a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__a = model.generate(_a , max_new_tokens=10 , do_sample=_a )
__a = greedy_ids[:, input_ids.shape[1] :]
__a = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__a = TextStreamer(_a , skip_prompt=_a )
model.generate(_a , max_new_tokens=10 , do_sample=_a , streamer=_a )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__a = cs.out[:-1]
self.assertEqual(_a , _a )
def __UpperCAmelCase ( self ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__a = AutoTokenizer.from_pretrained('''distilgpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_a )
__a = -1
__a = torch.ones((1, 5) , device=_a ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__a = TextStreamer(_a , skip_special_tokens=_a )
model.generate(_a , max_new_tokens=1 , do_sample=_a , streamer=_a )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__a = cs.out[:-1] # Remove the final "\n"
__a = tokenizer(_a , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __UpperCAmelCase ( self ):
__a = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__a = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_a )
__a = -1
__a = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_a )
__a = TextIteratorStreamer(_a , timeout=0.001 )
__a = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__a = Thread(target=model.generate , kwargs=_a )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_a ):
__a = ''''''
for new_text in streamer:
streamer_text += new_text
| 352 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_a , '''width_multiplier''' ) )
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=64 , _a=2 , _a=3 , _a="swish" , _a=3 , _a=32 , _a=0.1 , _a=0.02 , _a=True , _a=True , _a=10 , _a=None , _a=0.25 , _a=0.0 , _a=0.0 , ):
__a = parent
__a = batch_size
__a = image_size
__a = patch_size
__a = num_channels
__a = make_divisible(512 * width_multiplier , divisor=8 )
__a = hidden_act
__a = conv_kernel_size
__a = output_stride
__a = classifier_dropout_prob
__a = use_labels
__a = is_training
__a = num_labels
__a = initializer_range
__a = scope
__a = width_multiplier
__a = ffn_dropout
__a = attn_dropout
def __UpperCAmelCase ( self ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels, pixel_labels
def __UpperCAmelCase ( self ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = MobileViTVaModel(config=_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForImageClassification(_a )
model.to(_a )
model.eval()
__a = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , _a , _a , _a , _a ):
__a = self.num_labels
__a = MobileViTVaForSemanticSegmentation(_a )
model.to(_a )
model.eval()
__a = model(_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__a = model(_a , labels=_a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __UpperCAmelCase ( self ):
__a = self.prepare_config_and_inputs()
__a , __a , __a , __a = config_and_inputs
__a = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCAmelCase : Union[str, Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : Union[str, Any] = False
__UpperCAmelCase : Tuple = False
__UpperCAmelCase : List[str] = False
def __UpperCAmelCase ( self ):
__a = MobileViTVaModelTester(self )
__a = MobileViTVaConfigTester(self , config_class=_a , has_text_modality=_a )
def __UpperCAmelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileViTV2 does not use inputs_embeds''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not support input and output embeddings''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''MobileViTV2 does not output attentions''' )
def __UpperCAmelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(reason='''Got `CUDA error: misaligned address` for tests after this one being run.''' )
def __UpperCAmelCase ( self ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(_a )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __UpperCAmelCase ( self ):
def check_hidden_states_output(_a , _a , _a ):
__a = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(_a , _a ) )
__a = outputs.hidden_states
__a = 5
self.assertEqual(len(_a ) , _a )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__a = 2
for i in range(len(_a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(_a , _a , _a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
def __UpperCAmelCase ( self ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_a )
@slow
def __UpperCAmelCase ( self ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = MobileViTVaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowercase ( ) -> str:
__a = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCAmelCase ( self ):
return (
MobileViTImageProcessor.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForImageClassification.from_pretrained('''apple/mobilevitv2-1.0-imagenet1k-256''' ).to(
_a )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
# verify the logits
__a = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _a )
__a = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits
# verify the logits
__a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , _a )
__a = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=_a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _a , atol=1E-4 ) )
@slow
def __UpperCAmelCase ( self ):
__a = MobileViTVaForSemanticSegmentation.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = model.to(_a )
__a = MobileViTImageProcessor.from_pretrained('''shehan97/mobilevitv2-1.0-voc-deeplabv3''' )
__a = prepare_img()
__a = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
__a = model(**_a )
__a = outputs.logits.detach().cpu()
__a = image_processor.post_process_semantic_segmentation(outputs=_a , target_sizes=[(50, 60)] )
__a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , _a )
__a = image_processor.post_process_semantic_segmentation(outputs=_a )
__a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , _a )
| 11 | 0 |
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"nielsr/canine-s": 2_0_4_8,
}
# Unicode defines 1,114,112 total “codepoints”
lowercase_ = 1_1_1_4_1_1_2
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowercase_ = 0
lowercase_ = 0xe_000
lowercase_ = 0xe_001
lowercase_ = 0xe_002
lowercase_ = 0xe_003
lowercase_ = 0xe_004
# Maps special codepoints to human-readable names.
lowercase_ = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowercase_ = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _a=chr(_a ) , _a=chr(_a ) , _a=chr(_a ) , _a=chr(_a ) , _a=chr(_a ) , _a=chr(_a ) , _a=False , _a=2_048 , **_a , ):
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else bos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else eos_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else sep_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else cls_token
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , pad_token=_a , mask_token=_a , add_prefix_space=_a , model_max_length=_a , **_a , )
# Creates a mapping for looking up the IDs of special symbols.
__a = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
__a = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
__a = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
__a = UNICODE_VOCAB_SIZE
__a = len(self._special_codepoints )
@property
def __UpperCAmelCase ( self ):
return self._unicode_vocab_size
def __UpperCAmelCase ( self , _a ):
return list(_a )
def __UpperCAmelCase ( self , _a ):
try:
return ord(_a )
except TypeError:
raise ValueError(f'''invalid token: \'{token}\'''' )
def __UpperCAmelCase ( self , _a ):
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_a )
except TypeError:
raise ValueError(f'''invalid id: {index}''' )
def __UpperCAmelCase ( self , _a ):
return "".join(_a )
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
__a = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def __UpperCAmelCase ( self , _a , _a = None , _a = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a , token_ids_a=_a , already_has_special_tokens=_a )
__a = [1] + ([0] * len(_a )) + [1]
if token_ids_a is not None:
result += ([0] * len(_a )) + [1]
return result
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
__a = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def __UpperCAmelCase ( self , _a , _a = None ):
return ()
| 353 |
"""simple docstring"""
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a , _a , _a , _a , _a , _a , ):
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
__a = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = 1
__a = FrozenDict(_a )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
__a = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , _a , standard_warn=_a )
__a = dict(scheduler.config )
__a = True
__a = FrozenDict(_a )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=_a , segmentation_processor=_a , vae=_a , text_encoder=_a , tokenizer=_a , unet=_a , scheduler=_a , safety_checker=_a , feature_extractor=_a , )
def __UpperCAmelCase ( self , _a = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_a )
def __UpperCAmelCase ( self ):
self.enable_attention_slicing(_a )
def __UpperCAmelCase ( self ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__a = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(_a , _a )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCAmelCase ( self ):
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_a , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , _a , _a , _a , _a = 512 , _a = 512 , _a = 50 , _a = 7.5 , _a = None , _a = 1 , _a = 0.0 , _a = None , _a = None , _a = "pil" , _a = True , _a = None , _a = 1 , **_a , ):
__a = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
__a = self.segmentation_model(**_a )
__a = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
__a = self.numpy_to_pil(_a )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
__a = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=_a , image=_a , mask_image=_a , height=_a , width=_a , num_inference_steps=_a , guidance_scale=_a , negative_prompt=_a , num_images_per_prompt=_a , eta=_a , generator=_a , latents=_a , output_type=_a , return_dict=_a , callback=_a , callback_steps=_a , )
| 11 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = 'gptj'
__UpperCAmelCase : Dict = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _a=50_400 , _a=2_048 , _a=4_096 , _a=28 , _a=16 , _a=64 , _a=None , _a="gelu_new" , _a=0.0 , _a=0.0 , _a=0.0 , _a=1E-5 , _a=0.02 , _a=True , _a=50_256 , _a=50_256 , _a=False , **_a , ):
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = n_inner
__a = rotary_dim
__a = activation_function
__a = resid_pdrop
__a = embd_pdrop
__a = attn_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
bos_token_id=_a , eos_token_id=_a , tie_word_embeddings=_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = "default" , _a = None , _a = False , ):
super().__init__(_a , task=_a , patching_specs=_a , use_past=_a )
if not getattr(self._config , '''pad_token_id''' , _a ):
# TODO: how to do that better?
__a = 0
@property
def __UpperCAmelCase ( self ):
__a = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_a , direction='''inputs''' )
__a = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__a = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def __UpperCAmelCase ( self ):
return self._config.n_layer
@property
def __UpperCAmelCase ( self ):
return self._config.n_head
def __UpperCAmelCase ( self , _a , _a = -1 , _a = -1 , _a = False , _a = None , ):
__a = super(_a , self ).generate_dummy_inputs(
_a , batch_size=_a , seq_length=_a , is_pair=_a , framework=_a )
# We need to order the input in the way they appears in the forward()
__a = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__a , __a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__a = [
(torch.zeros(_a ), torch.zeros(_a )) for _ in range(self.num_layers )
]
__a = common_inputs['''attention_mask''']
if self.use_past:
__a = ordered_inputs['''attention_mask'''].dtype
__a = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_a , _a , dtype=_a )] , dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self ):
return 13
| 354 |
"""simple docstring"""
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , **_a ):
super().__init__(**_a )
if self.framework != "pt":
raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' )
# No specific FOR_XXX available yet
def __call__( self , _a , **_a ):
return super().__call__(_a , **_a )
def __UpperCAmelCase ( self , **_a ):
__a = {}
if "candidate_labels" in kwargs:
__a = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
__a = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def __UpperCAmelCase ( self , _a , _a=None , _a="This is a sound of {}." ):
if isinstance(_a , _a ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
__a = requests.get(_a ).content
else:
with open(_a , '''rb''' ) as f:
__a = f.read()
if isinstance(_a , _a ):
__a = ffmpeg_read(_a , self.feature_extractor.sampling_rate )
if not isinstance(_a , np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
__a = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='''pt''' )
__a = candidate_labels
__a = [hypothesis_template.format(_a ) for x in candidate_labels]
__a = self.tokenizer(_a , return_tensors=self.framework , padding=_a )
__a = [text_inputs]
return inputs
def __UpperCAmelCase ( self , _a ):
__a = model_inputs.pop('''candidate_labels''' )
__a = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , _a ):
__a = text_inputs[0]
else:
# Batching case.
__a = text_inputs[0][0]
__a = self.model(**_a , **_a )
__a = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def __UpperCAmelCase ( self , _a ):
__a = model_outputs.pop('''candidate_labels''' )
__a = model_outputs['''logits'''][0]
if self.framework == "pt":
__a = logits.softmax(dim=0 )
__a = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
__a = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(_a , _a ) , key=lambda _a : -x[0] )
]
return result
| 11 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/git-base": "https://huggingface.co/microsoft/git-base/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = 'git_vision_model'
def __init__( self , _a=768 , _a=3_072 , _a=12 , _a=12 , _a=3 , _a=224 , _a=16 , _a="quick_gelu" , _a=1E-5 , _a=0.0 , _a=0.02 , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = num_channels
__a = patch_size
__a = image_size
__a = initializer_range
__a = attention_dropout
__a = layer_norm_eps
__a = hidden_act
@classmethod
def __UpperCAmelCase ( cls , _a , **_a ):
cls._set_token_in_kwargs(_a )
__a , __a = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__a = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_a , **_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'git'
def __init__( self , _a=None , _a=30_522 , _a=768 , _a=6 , _a=12 , _a=3_072 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_024 , _a=0.02 , _a=1E-12 , _a=0 , _a="absolute" , _a=True , _a=False , _a=101 , _a=102 , _a=None , **_a , ):
super().__init__(bos_token_id=_a , eos_token_id=_a , pad_token_id=_a , **_a )
if vision_config is None:
__a = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__a = GitVisionConfig(**_a )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = initializer_range
__a = layer_norm_eps
__a = position_embedding_type
__a = use_cache
__a = tie_word_embeddings
__a = num_image_with_embedding
__a = bos_token_id
__a = eos_token_id
def __UpperCAmelCase ( self ):
__a = copy.deepcopy(self.__dict__ )
__a = self.vision_config.to_dict()
__a = self.__class__.model_type
return output
| 355 |
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss'''] ):
__a = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_a )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sgugger/tiny-distilbert-classification'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , only_pretrain_model=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , torchscript=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , fpaa=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
# set architectures equal to `None`
__a = None
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''' )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tinier_bart'''
__a = AutoConfig.from_pretrained(_a )
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_a , )
__a = PyTorchBenchmark(_a , configs=[config] )
__a = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , save_to_csv=_a , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_a , '''inf_time.csv''' ) , train_memory_csv_file=os.path.join(_a , '''train_mem.csv''' ) , inference_memory_csv_file=os.path.join(_a , '''inf_mem.csv''' ) , train_time_csv_file=os.path.join(_a , '''train_time.csv''' ) , env_info_csv_file=os.path.join(_a , '''env.csv''' ) , multi_process=_a , )
__a = PyTorchBenchmark(_a )
benchmark.run()
self.assertTrue(Path(os.path.join(_a , '''inf_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_time.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''inf_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''train_mem.csv''' ) ).exists() )
self.assertTrue(Path(os.path.join(_a , '''env.csv''' ) ).exists() )
def __UpperCAmelCase ( self ):
__a = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_a ):
self.assertTrue(hasattr(_a , '''sequential''' ) )
self.assertTrue(hasattr(_a , '''cumulative''' ) )
self.assertTrue(hasattr(_a , '''current''' ) )
self.assertTrue(hasattr(_a , '''total''' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_a , inference=_a , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_a , '''log.txt''' ) , log_print=_a , trace_memory_line_by_line=_a , multi_process=_a , )
__a = PyTorchBenchmark(_a )
__a = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_a , '''log.txt''' ) ).exists() )
| 11 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
lowercase_ = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
lowercase_ = 5_0_0_0_3
lowercase_ = 5_0_0_0_2
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = PLBartTokenizer
__UpperCAmelCase : Tuple = None
__UpperCAmelCase : str = False
def __UpperCAmelCase ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
__a = PLBartTokenizer(_a , language_codes='''base''' , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __UpperCAmelCase ( self ):
__a = PLBartTokenizer(_a , language_codes='''base''' , keep_accents=_a )
__a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a = tokenizer.vocab_size
__a = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 4 , _a )]
self.assertListEqual(_a , ['''__java__''', '''__python__''', '''__en_XX__''', '''<mask>'''] )
__a = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
def __UpperCAmelCase ( self ):
__a = PLBartTokenizer(_a , language_codes='''multi''' , keep_accents=_a )
__a = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__a = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
__a = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__a = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
__a = tokenizer.vocab_size
__a = [tokenizer.convert_ids_to_tokens(_a ) for x in range(end - 7 , _a )]
self.assertListEqual(
_a , ['''__java__''', '''__python__''', '''__en_XX__''', '''__javascript__''', '''__php__''', '''__ruby__''', '''__go__'''] )
__a = '''java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'''
__a = tokenizer(_a ).input_ids
self.assertEqual(
tokenizer.decode(_a , skip_special_tokens=_a , clean_up_tokenization_spaces=_a ) , _a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = 'uclanlp/plbart-python-en_XX'
__UpperCAmelCase : str = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
__UpperCAmelCase : str = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
__UpperCAmelCase : Optional[int] = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def __UpperCAmelCase ( cls ):
__a = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='''base''' , src_lang='''python''' , tgt_lang='''en_XX''' )
__a = 1
return cls
def __UpperCAmelCase ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__java__'''] , 50_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__python__'''] , 50_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''__en_XX__'''] , 50_003 )
def __UpperCAmelCase ( self ):
__a = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def __UpperCAmelCase ( self ):
self.assertIn(_a , self.tokenizer.all_special_ids )
__a = [EN_CODE, 9_037, 33_442, 57, 752, 153, 14, 56, 18, 9, 2]
__a = self.tokenizer.decode(_a , skip_special_tokens=_a )
__a = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def __UpperCAmelCase ( self ):
__a = ['''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''' * 20]
self.assertIsInstance(src_text[0] , _a )
__a = 10
__a = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , _a )
self.assertEqual(len(_a ) , _a )
def __UpperCAmelCase ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''__java__'''] ) , [50_004, 50_001] )
def __UpperCAmelCase ( self ):
__a = tempfile.mkdtemp()
__a = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
__a = PLBartTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a )
@require_torch
def __UpperCAmelCase ( self ):
__a = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors='''pt''' )
__a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , _a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def __UpperCAmelCase ( self ):
__a = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
__a = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 26) , batch.input_ids.shape )
self.assertEqual((2, 26) , batch.attention_mask.shape )
__a = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def __UpperCAmelCase ( self ):
__a = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors='''pt''' )
__a = self.tokenizer(
text_target=self.tgt_text , padding=_a , truncation=_a , max_length=10 , return_tensors='''pt''' )
__a = targets['''input_ids''']
__a = shift_tokens_right(_a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __UpperCAmelCase ( self ):
__a = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''java''' )
self.assertEqual(
nested_simplify(_a ) , {
# A, test, EOS, en_XX
'''input_ids''': [[150, 242, 2, 50_003]],
'''attention_mask''': [[1, 1, 1, 1]],
# java
'''forced_bos_token_id''': 50_001,
} , )
| 356 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
import argparse
import os
import re
lowercase_ = "src/transformers"
# Pattern that looks at the indentation in a line.
lowercase_ = re.compile(r"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase_ = re.compile(r"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase_ = re.compile(r"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase_ = re.compile(r"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase_ = re.compile(r"\[([^\]]+)\]")
def lowercase ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]:
__a = _re_indent.search(lowerCAmelCase__ )
return "" if search is None else search.groups()[0]
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any="" , lowerCAmelCase__ : Tuple=None , lowerCAmelCase__ : Union[str, Any]=None ) -> Optional[Any]:
__a = 0
__a = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowerCAmelCase__ ):
index += 1
__a = ['''\n'''.join(lines[:index] )]
else:
__a = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
__a = [lines[index]]
index += 1
while index < len(lowerCAmelCase__ ) and (end_prompt is None or not lines[index].startswith(lowerCAmelCase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowerCAmelCase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
if index < len(lowerCAmelCase__ ) - 1:
__a = [lines[index + 1]]
index += 1
else:
__a = []
else:
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
__a = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowerCAmelCase__ ) > 0:
blocks.append('''\n'''.join(lowerCAmelCase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowerCAmelCase__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowercase ( lowerCAmelCase__ : str ) -> Dict:
def _inner(lowerCAmelCase__ : List[str] ):
return key(lowerCAmelCase__ ).lower().replace('''_''' , '''''' )
return _inner
def lowercase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : str=None ) -> int:
# If no key is provided, we use a noop.
def noop(lowerCAmelCase__ : int ):
return x
if key is None:
__a = noop
# Constants are all uppercase, they go first.
__a = [obj for obj in objects if key(lowerCAmelCase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
__a = [obj for obj in objects if key(lowerCAmelCase__ )[0].isupper() and not key(lowerCAmelCase__ ).isupper()]
# Functions begin with a lowercase, they go last.
__a = [obj for obj in objects if not key(lowerCAmelCase__ )[0].isupper()]
__a = ignore_underscore(lowerCAmelCase__ )
return sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ ) + sorted(lowerCAmelCase__ , key=lowerCAmelCase__ )
def lowercase ( lowerCAmelCase__ : List[str] ) -> Tuple:
# This inner function sort imports between [ ].
def _replace(lowerCAmelCase__ : Union[str, Any] ):
__a = match.groups()[0]
if "," not in imports:
return f'''[{imports}]'''
__a = [part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a = keys[:-1]
return "[" + ", ".join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] ) + "]"
__a = import_statement.split('''\n''' )
if len(lowerCAmelCase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
__a = 2 if lines[1].strip() == '''[''' else 1
__a = [(i, _re_strip_line.search(lowerCAmelCase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
__a = sort_objects(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )
__a = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowerCAmelCase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
__a = _re_bracket_content.sub(_replace , lines[1] )
else:
__a = [part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
__a = keys[:-1]
__a = get_indent(lines[1] ) + ''', '''.join([f'''"{k}"''' for k in sort_objects(lowerCAmelCase__ )] )
return "\n".join(lowerCAmelCase__ )
else:
# Finally we have to deal with imports fitting on one line
__a = _re_bracket_content.sub(_replace , lowerCAmelCase__ )
return import_statement
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple=True ) -> str:
with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f:
__a = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
__a = split_code_in_indented_blocks(
lowerCAmelCase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything untils start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowerCAmelCase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
__a = main_blocks[block_idx]
__a = block.split('''\n''' )
# Get to the start of the imports.
__a = 0
while line_idx < len(lowerCAmelCase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
__a = len(lowerCAmelCase__ )
else:
line_idx += 1
if line_idx >= len(lowerCAmelCase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
__a = '''\n'''.join(block_lines[line_idx:-1] )
__a = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
__a = split_code_in_indented_blocks(lowerCAmelCase__ , indent_level=lowerCAmelCase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
__a = _re_direct_key if '''_import_structure = {''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
__a = [(pattern.search(lowerCAmelCase__ ).groups()[0] if pattern.search(lowerCAmelCase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
__a = [(i, key) for i, key in enumerate(lowerCAmelCase__ ) if key is not None]
__a = [x[0] for x in sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
__a = 0
__a = []
for i in range(len(lowerCAmelCase__ ) ):
if keys[i] is None:
reorderded_blocks.append(internal_blocks[i] )
else:
__a = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reorderded_blocks.append(lowerCAmelCase__ )
count += 1
# And we put our main block back together with its first and last line.
__a = '''\n'''.join(block_lines[:line_idx] + reorderded_blocks + [block_lines[-1]] )
if code != "\n".join(lowerCAmelCase__ ):
if check_only:
return True
else:
print(f'''Overwriting {file}.''' )
with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write('''\n'''.join(lowerCAmelCase__ ) )
def lowercase ( lowerCAmelCase__ : Optional[int]=True ) -> Optional[int]:
__a = []
for root, _, files in os.walk(lowerCAmelCase__ ):
if "__init__.py" in files:
__a = sort_imports(os.path.join(lowerCAmelCase__ , '''__init__.py''' ) , check_only=lowerCAmelCase__ )
if result:
__a = [os.path.join(lowerCAmelCase__ , '''__init__.py''' )]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(f'''Would overwrite {len(lowerCAmelCase__ )} files, run `make style`.''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowercase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 357 |
"""simple docstring"""
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self , _a , _a , _a ):
self.assertEqual(len(_a ) , len(_a ) )
for a, b in zip(_a , _a ):
self.assertAlmostEqual(_a , _a , delta=_a )
def __UpperCAmelCase ( self ):
__a = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(_a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1E-2 )
def __UpperCAmelCase ( self ):
__a = None
ops.enable_eager_execution_internal()
__a = tf.config.list_physical_devices('''CPU''' )
if len(_a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__a = tf.config.list_logical_devices(device_type='''CPU''' )
__a = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__a = GradientAccumulator()
__a = tf.Variable([4.0, 3.0] )
__a , __a = create_optimizer(5E-5 , 10 , 5 )
__a = tf.Variable([0.0, 0.0] , trainable=_a )
def accumulate_on_replica(_a ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(_a , _a ):
with strategy.scope():
__a = strategy.experimental_local_results(_a )
local_variables[0].assign(_a )
local_variables[1].assign(_a )
strategy.run(_a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(_a )
def _check_local_values(_a , _a ):
__a = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , _a , tol=1E-2 )
self.assertListAlmostEqual(values[1].value() , _a , tol=1E-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1E-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 11 | 0 |
"""simple docstring"""
from collections.abc import Callable
def lowercase ( lowerCAmelCase__ : Callable[[float], float] , lowerCAmelCase__ : float , lowerCAmelCase__ : float ) -> float:
__a = a
__a = b
if function(lowerCAmelCase__ ) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCAmelCase__ ) == 0:
return b
elif (
function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
__a = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(lowerCAmelCase__ ) == 0:
return mid
elif function(lowerCAmelCase__ ) * function(lowerCAmelCase__ ) < 0:
__a = mid
else:
__a = mid
__a = start + (end - start) / 2.0
return mid
def lowercase ( lowerCAmelCase__ : float ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 358 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowercase_ = None
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
lowercase_ = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
lowercase_ = {
"facebook/mbart-large-en-ro": 1_0_2_4,
"facebook/mbart-large-cc25": 1_0_2_4,
}
# fmt: off
lowercase_ = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Tuple = ['input_ids', 'attention_mask']
__UpperCAmelCase : Optional[Any] = MBartTokenizer
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__( self , _a=None , _a=None , _a="<s>" , _a="</s>" , _a="</s>" , _a="<s>" , _a="<unk>" , _a="<pad>" , _a="<mask>" , _a=None , _a=None , _a=None , **_a , ):
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(_a , lstrip=_a , rstrip=_a ) if isinstance(_a , _a ) else mask_token
super().__init__(
vocab_file=_a , tokenizer_file=_a , bos_token=_a , eos_token=_a , sep_token=_a , cls_token=_a , unk_token=_a , pad_token=_a , mask_token=_a , src_lang=_a , tgt_lang=_a , additional_special_tokens=_a , **_a , )
__a = vocab_file
__a = False if not self.vocab_file else True
__a = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'''additional_special_tokens''': _additional_special_tokens} )
__a = {
lang_code: self.convert_tokens_to_ids(_a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__a = src_lang if src_lang is not None else '''en_XX'''
__a = self.convert_tokens_to_ids(self._src_lang )
__a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _a ):
__a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _a , _a = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _a , _a = None ):
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a ):
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
__a = src_lang
__a = self(_a , add_special_tokens=_a , return_tensors=_a , **_a )
__a = self.convert_tokens_to_ids(_a )
__a = tgt_lang_id
return inputs
def __UpperCAmelCase ( self , _a , _a = "en_XX" , _a = None , _a = "ro_RO" , **_a , ):
__a = src_lang
__a = tgt_lang
return super().prepare_seqaseq_batch(_a , _a , **_a )
def __UpperCAmelCase ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a ):
__a = self.convert_tokens_to_ids(_a )
__a = []
__a = [self.eos_token_id, self.cur_lang_code]
__a = self.convert_ids_to_tokens(self.prefix_tokens )
__a = self.convert_ids_to_tokens(self.suffix_tokens )
__a = processors.TemplateProcessing(
single=prefix_tokens_str + ['''$A'''] + suffix_tokens_str , pair=prefix_tokens_str + ['''$A''', '''$B'''] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self , _a , _a = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__a = os.path.join(
_a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file , _a )
return (out_vocab_file,)
| 11 | 0 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__a = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(_a ) , torch_builtin(_a ) ) )
self.assertFalse(torch.allclose(gelu_python(_a ) , gelu_new(_a ) ) )
def __UpperCAmelCase ( self ):
__a = torch.tensor([-100, -1, -0.1, 0, 0.1, 1.0, 100] )
__a = get_activation('''gelu''' )
__a = get_activation('''gelu_10''' )
__a = torch_builtin(_a )
__a = geluaa(_a )
__a = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(_a ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def __UpperCAmelCase ( self ):
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(_a ):
get_activation('''bogus''' )
with self.assertRaises(_a ):
get_activation(_a )
def __UpperCAmelCase ( self ):
__a = get_activation('''gelu''' )
__a = 1
__a = get_activation('''gelu''' )
self.assertEqual(acta.a , 1 )
with self.assertRaises(_a ):
__a = acta.a
| 359 |
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase_ = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class __lowerCAmelCase ( unittest.TestCase , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = load_tool('''text-question-answering''' )
self.tool.setup()
__a = load_tool('''text-question-answering''' , remote=_a )
def __UpperCAmelCase ( self ):
__a = self.tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(_a , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
def __UpperCAmelCase ( self ):
__a = self.remote_tool(text=_a , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_a , '''launched the BigScience Research Workshop''' )
| 11 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowercase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 360 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 11 | 0 |
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
lowercase_ = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n"
lowercase_ = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n"
lowercase_ = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}"
def lowercase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : bool , lowerCAmelCase__ : Optional[Dict[int, int]] = None , lowerCAmelCase__ : bool = False , ) -> Union[str, Any]:
if label_map is not None:
for old_id, new_id in label_map.items():
__a = new_id
# turn into Numpy arrays
__a = np.array(lowerCAmelCase__ )
__a = np.array(lowerCAmelCase__ )
if reduce_labels:
__a = 255
__a = label - 1
__a = 255
__a = label != ignore_index
__a = np.not_equal(lowerCAmelCase__ , lowerCAmelCase__ )
__a = pred_label[mask]
__a = np.array(lowerCAmelCase__ )[mask]
__a = pred_label[pred_label == label]
__a = np.histogram(lowerCAmelCase__ , bins=lowerCAmelCase__ , range=(0, num_labels - 1) )[0]
__a = np.histogram(lowerCAmelCase__ , bins=lowerCAmelCase__ , range=(0, num_labels - 1) )[0]
__a = np.histogram(lowerCAmelCase__ , bins=lowerCAmelCase__ , range=(0, num_labels - 1) )[0]
__a = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : bool , lowerCAmelCase__ : Optional[Dict[int, int]] = None , lowerCAmelCase__ : bool = False , ) -> Dict:
__a = np.zeros((num_labels,) , dtype=np.floataa )
__a = np.zeros((num_labels,) , dtype=np.floataa )
__a = np.zeros((num_labels,) , dtype=np.floataa )
__a = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
__a , __a , __a , __a = intersect_and_union(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def lowercase ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : bool , lowerCAmelCase__ : Optional[int] = None , lowerCAmelCase__ : Optional[Dict[int, int]] = None , lowerCAmelCase__ : bool = False , ) -> List[str]:
__a , __a , __a , __a = total_intersect_and_union(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# compute metrics
__a = {}
__a = total_area_intersect.sum() / total_area_label.sum()
__a = total_area_intersect / total_area_union
__a = total_area_intersect / total_area_label
__a = np.nanmean(lowerCAmelCase__ )
__a = np.nanmean(lowerCAmelCase__ )
__a = all_acc
__a = iou
__a = acc
if nan_to_num is not None:
__a = {metric: np.nan_to_num(lowerCAmelCase__ , nan=lowerCAmelCase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'''predictions''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
'''references''': datasets.Sequence(datasets.Sequence(datasets.Value('''uint16''' ) ) ),
} ) , reference_urls=[
'''https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'''
] , )
def __UpperCAmelCase ( self , _a , _a , _a , _a , _a = None , _a = None , _a = False , ):
__a = mean_iou(
results=_a , gt_seg_maps=_a , num_labels=_a , ignore_index=_a , nan_to_num=_a , label_map=_a , reduce_labels=_a , )
return iou_result
| 361 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[int] , lowerCAmelCase__ : list[list[str]] , lowerCAmelCase__ : int , ) -> None:
__a = len(lowerCAmelCase__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCAmelCase__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCAmelCase__ , lowerCAmelCase__ , )
def lowercase ( lowerCAmelCase__ : int ) -> None:
__a = []
depth_first_search([] , [] , [] , lowerCAmelCase__ , lowerCAmelCase__ )
# Print all the boards
for board in boards:
for column in board:
print(lowerCAmelCase__ )
print('''''' )
print(len(lowerCAmelCase__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 11 | 0 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 362 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowercase_ = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 11 | 0 |
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowercase_ = get_logger(__name__)
lowercase_ = r"\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n"
class __lowerCAmelCase :
'''simple docstring'''
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase :
'''simple docstring'''
@add_start_docstrings(_a )
def __call__( self , _a , _a ):
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@add_start_docstrings(_a )
def __call__( self , _a , _a , _a , **_a ):
for processor in self:
__a = inspect.signature(processor.__call__ ).parameters
if len(_a ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
f'''Make sure that all the required parameters: {list(function_args.keys() )} for '''
f'''{processor.__class__} are passed to the logits processor.''' )
__a = processor(_a , _a , _a , **_a )
else:
__a = processor(_a , _a , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
if not isinstance(_a , _a ) or not (temperature > 0):
raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' )
__a = temperature
def __call__( self , _a , _a , _a ):
__a = scores / self.temperature
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = -float('''Inf''' ) , _a = 1 ):
if not isinstance(_a , _a ) or (top_p < 0 or top_p > 1.0):
raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' )
if not isinstance(_a , _a ) or (min_tokens_to_keep < 1):
raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' )
__a = top_p
__a = filter_value
__a = min_tokens_to_keep
def __call__( self , _a , _a , _a ):
__a , __a = lax.top_k(_a , scores.shape[-1] )
__a = jnp.full_like(_a , self.filter_value )
__a = jax.nn.softmax(_a , axis=-1 ).cumsum(axis=-1 )
__a = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
__a = jnp.roll(_a , 1 )
score_mask |= score_mask.at[:, 0].set(_a )
# min tokens to keep
__a = score_mask.at[:, : self.min_tokens_to_keep].set(_a )
__a = jnp.where(_a , _a , _a )
__a = jax.lax.sort_key_val(_a , _a )[-1]
return next_scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = -float('''Inf''' ) , _a = 1 ):
if not isinstance(_a , _a ) or top_k <= 0:
raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' )
__a = max(_a , _a )
__a = filter_value
def __call__( self , _a , _a , _a ):
__a , __a = scores.shape
__a = jnp.full(batch_size * vocab_size , self.filter_value )
__a = min(self.top_k , scores.shape[-1] ) # Safety check
__a , __a = lax.top_k(_a , _a )
__a = jnp.broadcast_to((jnp.arange(_a ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
__a = topk_scores.flatten()
__a = topk_indices.flatten() + shift
__a = next_scores_flat.at[topk_indices_flat].set(_a )
__a = next_scores_flat.reshape(_a , _a )
return next_scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
__a = bos_token_id
def __call__( self , _a , _a , _a ):
__a = jnp.full(scores.shape , -float('''inf''' ) )
__a = 1 - jnp.bool_(cur_len - 1 )
__a = jnp.where(_a , new_scores.at[:, self.bos_token_id].set(0 ) , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a ):
__a = max_length
__a = eos_token_id
def __call__( self , _a , _a , _a ):
__a = jnp.full(scores.shape , -float('''inf''' ) )
__a = 1 - jnp.bool_(cur_len - self.max_length + 1 )
__a = jnp.where(_a , new_scores.at[:, self.eos_token_id].set(0 ) , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a ):
if not isinstance(_a , _a ) or min_length < 0:
raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' )
if not isinstance(_a , _a ) or eos_token_id < 0:
raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' )
__a = min_length
__a = eos_token_id
def __call__( self , _a , _a , _a ):
# create boolean flag to decide if min length penalty should be applied
__a = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
__a = jnp.where(_a , scores.at[:, self.eos_token_id].set(-float('''inf''' ) ) , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a ):
__a = list(_a )
__a = begin_index
def __call__( self , _a , _a , _a ):
__a = 1 - jnp.bool_(cur_len - self.begin_index )
__a = jnp.where(_a , scores.at[:, self.begin_suppress_tokens].set(-float('''inf''' ) ) , _a )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
__a = list(_a )
def __call__( self , _a , _a , _a ):
__a = scores.at[..., self.suppress_tokens].set(-float('''inf''' ) )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
__a = dict(_a )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
__a = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
__a = force_token_array.at[index].set(_a )
__a = jnp.intaa(_a )
def __call__( self , _a , _a , _a ):
def _force_token(_a ):
__a = scores.shape[0]
__a = self.force_token_array[generation_idx]
__a = jnp.ones_like(_a , dtype=scores.dtype ) * -float('''inf''' )
__a = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
__a = lax.dynamic_update_slice(_a , _a , (0, current_token) )
return new_scores
__a = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(_a ) , lambda: scores , ) , )
return scores
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
__a = generate_config.eos_token_id
__a = generate_config.no_timestamps_token_id
__a = generate_config.no_timestamps_token_id + 1
__a = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(_a , '''max_initial_timestamp_index''' ):
__a = generate_config.max_initial_timestamp_index
else:
__a = model_config.vocab_size
if self.max_initial_timestamp_index is None:
__a = model_config.vocab_size
def __call__( self , _a , _a , _a ):
# suppress <|notimestamps|> which is handled by without_timestamps
__a = scores.at[:, self.no_timestamps_token_id].set(-float('''inf''' ) )
def handle_pairs(_a , _a ):
__a = jnp.where((cur_len - self.begin_index) >= 1 , _a , _a )
__a = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , _a , )
__a = jnp.where((cur_len - self.begin_index) < 2 , _a , _a )
__a = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , _a , _a , )
return jnp.where(
_a , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('''inf''' ) ) , scores_k.at[: self.eos_token_id].set(-float('''inf''' ) ) , ) , _a , )
__a = jax.vmap(_a )(_a , _a )
__a = jnp.where(cur_len == self.begin_index , _a , _a )
__a = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , _a , )
__a = self.timestamp_begin + self.max_initial_timestamp_index
__a = jnp.where(
_a , scores.at[:, last_allowed + 1 :].set(-float('''inf''' ) ) , _a , )
# if sum of probability over timestamps is above any other token, sample timestamp
__a = jax.nn.log_softmax(_a , axis=-1 )
def handle_cumulative_probs(_a , _a ):
__a = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
__a = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('''inf''' ) ) , _a , )
__a = jax.vmap(_a )(_a , _a )
return scores
| 363 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = 'vit_mae'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=224 , _a=16 , _a=3 , _a=True , _a=16 , _a=512 , _a=8 , _a=2_048 , _a=0.75 , _a=False , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = decoder_num_attention_heads
__a = decoder_hidden_size
__a = decoder_num_hidden_layers
__a = decoder_intermediate_size
__a = mask_ratio
__a = norm_pix_loss
| 11 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a = None , _a = None , _a = None , _a = False , _a = False , _a = None , _a = None , **_a , ):
super().__init__(
_a , split=_a , features=_a , cache_dir=_a , keep_in_memory=_a , streaming=_a , num_proc=_a , **_a , )
__a = field
__a = path_or_paths if isinstance(_a , _a ) else {self.split: path_or_paths}
__a = Json(
cache_dir=_a , data_files=_a , features=_a , field=_a , **_a , )
def __UpperCAmelCase ( self ):
# Build iterable dataset
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=_a , download_mode=_a , verification_mode=_a , base_path=_a , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=_a , in_memory=self.keep_in_memory )
return dataset
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a = None , _a = None , **_a , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
__a = dataset
__a = path_or_buf
__a = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__a = num_proc
__a = '''utf-8'''
__a = to_json_kwargs
def __UpperCAmelCase ( self ):
__a = self.to_json_kwargs.pop('''path_or_buf''' , _a )
__a = self.to_json_kwargs.pop('''orient''' , '''records''' )
__a = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
__a = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
__a = self.to_json_kwargs.pop('''compression''' , _a )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_a ) as buffer:
__a = self._write(file_obj=_a , orient=_a , lines=_a , index=_a , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
__a = self._write(
file_obj=self.path_or_buf , orient=_a , lines=_a , index=_a , **self.to_json_kwargs )
return written
def __UpperCAmelCase ( self , _a ):
__a , __a , __a , __a , __a = args
__a = query_table(
table=self.dataset.data , key=slice(_a , offset + self.batch_size ) , indices=self.dataset._indices , )
__a = batch.to_pandas().to_json(
path_or_buf=_a , orient=_a , lines=_a , index=_a , **_a )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def __UpperCAmelCase ( self , _a , _a , _a , _a , **_a , ):
__a = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
__a = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_a )
else:
__a , __a = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _a , _a )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_a )
return written
| 364 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : str = ['image_processor', 'tokenizer']
__UpperCAmelCase : str = 'LayoutLMv3ImageProcessor'
__UpperCAmelCase : Optional[int] = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self , _a=None , _a=None , **_a ):
__a = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
__a = kwargs.pop('''feature_extractor''' )
__a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
# first, apply the image processor
__a = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
__a = [text] # add batch dimension (as the image processor always adds a batch dimension)
__a = features['''words''']
__a = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
__a = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__a = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
__a = images
return encoded_inputs
def __UpperCAmelCase ( self , _a , _a ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
__a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f''' {len(_a )} and {len(_a )}''' )
return images_with_overflow
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.batch_decode(*_a , **_a )
def __UpperCAmelCase ( self , *_a , **_a ):
return self.tokenizer.decode(*_a , **_a )
@property
def __UpperCAmelCase ( self ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase ( lowerCAmelCase__ : list[int] , lowerCAmelCase__ : int ) -> int:
if len(lowerCAmelCase__ ) < k or k < 0:
raise ValueError('''Invalid Input''' )
__a = __a = sum(array[:k] )
for i in range(len(lowerCAmelCase__ ) - k ):
__a = current_sum - array[i] + array[i + k]
__a = max(lowerCAmelCase__ , lowerCAmelCase__ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
lowercase_ = [randint(-1_0_0_0, 1_0_0_0) for i in range(1_0_0)]
lowercase_ = randint(0, 1_1_0)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 365 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowerCAmelCase__ : int ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
__a = []
for i in range(lowerCAmelCase__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) )
return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers]
__UpperCAmelCase : str = 2
@register_to_config
def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ):
if trained_betas is not None:
__a = torch.tensor(_a , dtype=torch.floataa )
elif beta_schedule == "linear":
__a = torch.linspace(_a , _a , _a , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(_a )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
__a = 1.0 - self.betas
__a = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_a , _a , _a )
def __UpperCAmelCase ( self , _a , _a=None ):
if schedule_timesteps is None:
__a = self.timesteps
__a = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__a = 1 if len(_a ) > 1 else 0
else:
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
__a = self._index_counter[timestep_int]
return indices[pos].item()
@property
def __UpperCAmelCase ( self ):
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def __UpperCAmelCase ( self , _a , _a , ):
__a = self.index_for_timestep(_a )
if self.state_in_first_order:
__a = self.sigmas[step_index]
else:
__a = self.sigmas_interpol[step_index]
__a = sample / ((sigma**2 + 1) ** 0.5)
return sample
def __UpperCAmelCase ( self , _a , _a = None , _a = None , ):
__a = num_inference_steps
__a = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__a = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__a = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
__a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__a = torch.from_numpy(np.log(_a ) ).to(_a )
__a = np.interp(_a , np.arange(0 , len(_a ) ) , _a )
__a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__a = torch.from_numpy(_a ).to(device=_a )
# interpolate sigmas
__a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
__a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
__a = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(_a ).startswith('''mps''' ):
# mps does not support float64
__a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa )
else:
__a = torch.from_numpy(_a ).to(_a )
# interpolate timesteps
__a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype )
__a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
__a = torch.cat([timesteps[:1], interleaved_timesteps] )
__a = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__a = defaultdict(_a )
def __UpperCAmelCase ( self , _a ):
# get log sigma
__a = sigma.log()
# get distribution
__a = log_sigma - self.log_sigmas[:, None]
# get sigmas range
__a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
__a = low_idx + 1
__a = self.log_sigmas[low_idx]
__a = self.log_sigmas[high_idx]
# interpolate sigmas
__a = (low - log_sigma) / (low - high)
__a = w.clamp(0 , 1 )
# transform interpolation to time range
__a = (1 - w) * low_idx + w * high_idx
__a = t.view(sigma.shape )
return t
@property
def __UpperCAmelCase ( self ):
return self.sample is None
def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ):
__a = self.index_for_timestep(_a )
# advance index counter by 1
__a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__a = self.sigmas[step_index]
__a = self.sigmas_interpol[step_index + 1]
__a = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
__a = self.sigmas[step_index - 1]
__a = self.sigmas_interpol[step_index]
__a = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__a = 0
__a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__a = sigma_hat if self.state_in_first_order else sigma_interpol
__a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('''prediction_type not implemented yet: sample''' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__a = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__a = sigma_interpol - sigma_hat
# store for 2nd order step
__a = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
__a = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
__a = sigma_next - sigma_hat
__a = self.sample
__a = None
__a = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_a )
def __UpperCAmelCase ( self , _a , _a , _a , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
__a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_a ):
# mps does not support float64
__a = self.timesteps.to(original_samples.device , dtype=torch.floataa )
__a = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
__a = self.timesteps.to(original_samples.device )
__a = timesteps.to(original_samples.device )
__a = [self.index_for_timestep(_a , _a ) for t in timesteps]
__a = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__a = sigma.unsqueeze(-1 )
__a = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
return self.config.num_train_timesteps
| 11 | 0 |
"""simple docstring"""
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowercase_ = TypeVar("T")
def lowercase ( lowerCAmelCase__ : int ) -> int:
return (position - 1) // 2
def lowercase ( lowerCAmelCase__ : int ) -> int:
return (2 * position) + 1
def lowercase ( lowerCAmelCase__ : int ) -> int:
return (2 * position) + 2
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
__a = []
__a = {}
__a = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def __UpperCAmelCase ( self ):
# Check if the priority queue is empty
return self.elements == 0
def __UpperCAmelCase ( self , _a , _a ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
__a = self.elements
self.elements += 1
self._bubble_up(_a )
def __UpperCAmelCase ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
__a , __a = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__a , __a = self.heap[0]
self._bubble_down(_a )
return elem
def __UpperCAmelCase ( self , _a , _a ):
# Update the weight of the given key
__a = self.position_map[elem]
__a = (elem, weight)
if position > 0:
__a = get_parent_position(_a )
__a , __a = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_a )
else:
self._bubble_down(_a )
else:
self._bubble_down(_a )
def __UpperCAmelCase ( self , _a ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
__a = self.position_map[elem]
if curr_pos == 0:
return None
__a = get_parent_position(_a )
__a , __a = self.heap[curr_pos]
__a , __a = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_a , _a )
return self._bubble_up(_a )
return None
def __UpperCAmelCase ( self , _a ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
__a = self.position_map[elem]
__a , __a = self.heap[curr_pos]
__a = get_child_left_position(_a )
__a = get_child_right_position(_a )
if child_left_position < self.elements and child_right_position < self.elements:
__a , __a = self.heap[child_left_position]
__a , __a = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
if child_left_position < self.elements:
__a , __a = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
else:
return None
if child_right_position < self.elements:
__a , __a = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_a , _a )
return self._bubble_down(_a )
return None
def __UpperCAmelCase ( self , _a , _a ):
# Swap the nodes at the given positions
__a = self.heap[nodea_pos][0]
__a = self.heap[nodea_pos][0]
__a , __a = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__a = nodea_pos
__a = nodea_pos
class __lowerCAmelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
__a = {}
__a = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def __UpperCAmelCase ( self , _a ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
__a = {}
self.nodes += 1
def __UpperCAmelCase ( self , _a , _a , _a ):
# Add an edge between 2 nodes in the graph
self.add_node(_a )
self.add_node(_a )
__a = weight
__a = weight
def lowercase ( lowerCAmelCase__ : GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
__a = {node: maxsize for node in graph.connections}
__a = {node: None for node in graph.connections}
__a = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCAmelCase__ , lowerCAmelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
__a = priority_queue.extract_min()
__a = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
__a = node
# running prim's algorithm
while not priority_queue.is_empty():
__a = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__a = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCAmelCase__ , dist[neighbour] )
__a = node
return dist, parent
| 366 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Any = 'rwkv'
__UpperCAmelCase : Optional[Any] = {'max_position_embeddings': 'context_length'}
def __init__( self , _a=50_277 , _a=1_024 , _a=4_096 , _a=32 , _a=None , _a=None , _a=1E-5 , _a=0 , _a=0 , _a=6 , _a=False , _a=True , **_a , ):
__a = vocab_size
__a = context_length
__a = hidden_size
__a = num_hidden_layers
__a = attention_hidden_size if attention_hidden_size is not None else hidden_size
__a = intermediate_size if intermediate_size is not None else 4 * hidden_size
__a = layer_norm_epsilon
__a = rescale_every
__a = use_cache
__a = bos_token_id
__a = eos_token_id
super().__init__(
tie_word_embeddings=_a , bos_token_id=_a , eos_token_id=_a , **_a )
| 11 | 0 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
lowercase_ = logging.getLogger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = 'sequence-classification'
def __init__( self , _a ):
if type(_a ) == dict:
__a = Namespace(**_a )
__a = glue_output_modes[hparams.task]
__a = glue_tasks_num_labels[hparams.task]
super().__init__(_a , _a , self.mode )
def __UpperCAmelCase ( self , **_a ):
return self.model(**_a )
def __UpperCAmelCase ( self , _a , _a ):
__a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
__a = self(**_a )
__a = outputs[0]
__a = self.trainer.lr_schedulers[0]['''scheduler''']
__a = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __UpperCAmelCase ( self ):
__a = self.hparams
__a = processors[args.task]()
__a = processor.get_labels()
for mode in ["train", "dev"]:
__a = self._feature_file(_a )
if os.path.exists(_a ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , _a )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
__a = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
__a = convert_examples_to_features(
_a , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info('''Saving features into cached file %s''' , _a )
torch.save(_a , _a )
def __UpperCAmelCase ( self , _a , _a , _a = False ):
__a = '''dev''' if mode == '''test''' else mode
__a = self._feature_file(_a )
logger.info('''Loading features from cached file %s''' , _a )
__a = torch.load(_a )
__a = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
__a = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
__a = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
__a = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
__a = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(_a , _a , _a , _a ) , batch_size=_a , shuffle=_a , )
def __UpperCAmelCase ( self , _a , _a ):
__a = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
__a = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
__a = self(**_a )
__a , __a = outputs[:2]
__a = logits.detach().cpu().numpy()
__a = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __UpperCAmelCase ( self , _a ):
__a = torch.stack([x['''val_loss'''] for x in outputs] ).mean().detach().cpu().item()
__a = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
__a = np.argmax(_a , axis=1 )
elif self.hparams.glue_output_mode == "regression":
__a = np.squeeze(_a )
__a = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
__a = [[] for _ in range(out_label_ids.shape[0] )]
__a = [[] for _ in range(out_label_ids.shape[0] )]
__a = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , _a , _a )}
__a = dict(results.items() )
__a = results
return ret, preds_list, out_label_list
def __UpperCAmelCase ( self , _a ):
__a , __a , __a = self._eval_end(_a )
__a = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __UpperCAmelCase ( self , _a ):
__a , __a , __a = self._eval_end(_a )
__a = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __UpperCAmelCase ( _a , _a ):
BaseTransformer.add_model_specific_args(_a , _a )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=_a , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--task''' , default='''''' , type=_a , required=_a , help='''The GLUE task to run''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=_a , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
def lowercase ( ) -> List[str]:
__a = argparse.ArgumentParser()
add_generic_args(lowerCAmelCase__ , os.getcwd() )
__a = GLUETransformer.add_model_specific_args(lowerCAmelCase__ , os.getcwd() )
__a = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
__a = os.path.join(
'''./results''' , f'''{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}''' , )
os.makedirs(args.output_dir )
__a = GLUETransformer(lowerCAmelCase__ )
__a = generic_train(lowerCAmelCase__ , lowerCAmelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
__a = sorted(glob.glob(os.path.join(args.output_dir , '''checkpoint-epoch=*.ckpt''' ) , recursive=lowerCAmelCase__ ) )
__a = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 367 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = (UnCLIPScheduler,)
def __UpperCAmelCase ( self , **_a ):
__a = {
'''num_train_timesteps''': 1_000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**_a )
return config
def __UpperCAmelCase ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __UpperCAmelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_a )
def __UpperCAmelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __UpperCAmelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_a )
def __UpperCAmelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_a )
def __UpperCAmelCase ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_a , prev_timestep=_a )
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''fixed_small_log''' )
__a = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.999_4987 ) ) < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(variance_type='''learned_range''' )
__a = scheduler_class(**_a )
__a = 0.5
assert scheduler._get_variance(1 , predicted_variance=_a ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=_a ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=_a ) - -0.001_0011 < 1E-5
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __UpperCAmelCase ( self ):
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config()
__a = scheduler_class(**_a )
scheduler.set_timesteps(25 )
__a = scheduler.timesteps
__a = self.dummy_model()
__a = self.dummy_sample_deter
__a = torch.manual_seed(0 )
for i, t in enumerate(_a ):
# 1. predict noise residual
__a = model(_a , _a )
if i + 1 == timesteps.shape[0]:
__a = None
else:
__a = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__a = scheduler.step(
_a , _a , _a , prev_timestep=_a , generator=_a ).prev_sample
__a = pred_prev_sample
__a = torch.sum(torch.abs(_a ) )
__a = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __UpperCAmelCase ( self ):
pass
def __UpperCAmelCase ( self ):
pass
| 11 | 0 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase_ = get_tests_dir("fixtures/dummy_feature_extractor_config.json")
lowercase_ = get_tests_dir("fixtures/vocab.json")
lowercase_ = get_tests_dir("fixtures")
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def __UpperCAmelCase ( self ):
__a = 0
def __UpperCAmelCase ( self ):
__a = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__a = WavaVecaConfig()
__a = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''' )
# save in new folder
model_config.save_pretrained(_a )
processor.save_pretrained(_a )
__a = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(_a , os.path.join(_a , _a ) )
copyfile(_a , os.path.join(_a , '''vocab.json''' ) )
__a = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__a = WavaVecaFeatureExtractor()
__a = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
__a = WavaVecaProcessor(_a , _a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in tokenizer
with open(os.path.join(_a , _a ) , '''r''' ) as f:
__a = json.load(_a )
config_dict.pop('''processor_class''' )
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write(json.dumps(_a ) )
__a = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__a = WavaVecaFeatureExtractor()
__a = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''' )
__a = WavaVecaProcessor(_a , _a )
# save in new folder
processor.save_pretrained(_a )
# drop `processor_class` in feature extractor
with open(os.path.join(_a , _a ) , '''r''' ) as f:
__a = json.load(_a )
config_dict.pop('''processor_class''' )
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write(json.dumps(_a ) )
__a = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
with tempfile.TemporaryDirectory() as tmpdirname:
__a = WavaVecaConfig(processor_class='''Wav2Vec2Processor''' )
model_config.save_pretrained(_a )
# copy relevant files
copyfile(_a , os.path.join(_a , '''vocab.json''' ) )
# create emtpy sample processor
with open(os.path.join(_a , _a ) , '''w''' ) as f:
f.write('''{}''' )
__a = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __UpperCAmelCase ( self ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
__a = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
__a = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
__a = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
__a = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
__a = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''' )
# Test we can also load the slow version
__a = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a , use_fast=_a )
__a = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''' )
def __UpperCAmelCase ( self ):
try:
AutoConfig.register('''custom''' , _a )
AutoFeatureExtractor.register(_a , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoProcessor.register(_a , _a )
# Now that the config is registered, it can be used as any other config with the auto-API
__a = CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__a = CustomTokenizer(_a )
__a = CustomProcessor(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(_a )
__a = AutoProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self ):
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = False
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = False
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = 'AutoFeatureExtractor'
__UpperCAmelCase : Any = 'AutoTokenizer'
__UpperCAmelCase : List[Any] = False
try:
AutoConfig.register('''custom''' , _a )
AutoFeatureExtractor.register(_a , _a )
AutoTokenizer.register(_a , slow_tokenizer_class=_a )
AutoProcessor.register(_a , _a )
# If remote code is not set, the default is to use local classes.
__a = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
__a = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
__a = AutoProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=_a )
self.assertEqual(processor.__class__.__name__ , '''NewProcessor''' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __UpperCAmelCase ( self ):
__a = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''' )
def __UpperCAmelCase ( self ):
__a = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''' )
self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''' )
@is_staging_test
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def __UpperCAmelCase ( cls ):
__a = TOKEN
HfFolder.save_token(_a )
@classmethod
def __UpperCAmelCase ( cls ):
try:
delete_repo(token=cls._token , repo_id='''test-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-processor''' )
except HTTPError:
pass
def __UpperCAmelCase ( self ):
__a = WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a , '''test-processor''' ) , push_to_hub=_a , use_auth_token=self._token )
__a = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(new_processor.feature_extractor , _a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __UpperCAmelCase ( self ):
__a = WavaVecaProcessor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(_a , '''test-processor-org''' ) , push_to_hub=_a , use_auth_token=self._token , organization='''valid_org''' , )
__a = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(_a , getattr(new_processor.feature_extractor , _a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __UpperCAmelCase ( self ):
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
__a = CustomFeatureExtractor.from_pretrained(_a )
with tempfile.TemporaryDirectory() as tmp_dir:
__a = os.path.join(_a , '''vocab.txt''' )
with open(_a , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens] ) )
__a = CustomTokenizer(_a )
__a = CustomProcessor(_a , _a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
__a = Repository(_a , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(_a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''',
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(_a , '''tokenizer_config.json''' ) ) as f:
__a = json.load(_a )
self.assertDictEqual(
tokenizer_config['''auto_map'''] , {
'''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None],
'''AutoProcessor''': '''custom_processing.CustomProcessor''',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_feature_extraction.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_tokenization.py''' ) ) )
self.assertTrue(os.path.isfile(os.path.join(_a , '''custom_processing.py''' ) ) )
repo.push_to_hub()
__a = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=_a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''' )
| 368 |
"""simple docstring"""
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowercase ( lowerCAmelCase__ : Dict ) -> Optional[int]:
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
__a = np.array(lowerCAmelCase__ ).astype(np.floataa ) / 2_55.0
__a = image[None].transpose(0 , 3 , 1 , 2 )
__a = torch.from_numpy(lowerCAmelCase__ )
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a , _a , _a , ):
super().__init__()
self.register_modules(vqvae=_a , unet=_a , scheduler=_a )
@torch.no_grad()
def __call__( self , _a = None , _a = 1 , _a = 100 , _a = 0.0 , _a = None , _a = "pil" , _a = True , ):
if isinstance(_a , PIL.Image.Image ):
__a = 1
elif isinstance(_a , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}''' )
if isinstance(_a , PIL.Image.Image ):
__a = preprocess(_a )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(_a , generator=_a , device=self.device , dtype=_a )
__a = image.to(device=self.device , dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(_a , _a )
# predict the noise residual
__a = self.unet(_a , _a ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(_a , _a , _a , **_a ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(_a ).sample
__a = torch.clamp(_a , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 11 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
lowercase_ = logging.get_logger("transformers.models.speecht5")
lowercase_ = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
lowercase_ = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
lowercase_ = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
lowercase_ = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
lowercase_ = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
lowercase_ = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
lowercase_ = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
lowercase_ = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
lowercase_ = []
lowercase_ = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
lowercase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
lowercase_ = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
lowercase_ = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict ) -> Union[str, Any]:
for attribute in key.split('''.''' ):
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ )
if weight_type is not None:
__a = getattr(lowerCAmelCase__ , lowerCAmelCase__ ).shape
else:
__a = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
elif weight_type == "running_mean":
__a = value
elif weight_type == "running_var":
__a = value
elif weight_type == "num_batches_tracked":
__a = value
else:
__a = value
logger.info(f'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__a , __a = key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any] ) -> Optional[Any]:
__a = []
if task == "s2t":
__a = hf_model.speechta.encoder.prenet.feature_encoder
__a = MAPPING_S2T
__a = IGNORE_KEYS_S2T
elif task == "t2s":
__a = None
__a = MAPPING_T2S
__a = IGNORE_KEYS_T2S
elif task == "s2s":
__a = hf_model.speechta.encoder.prenet.feature_encoder
__a = MAPPING_S2S
__a = IGNORE_KEYS_S2S
else:
raise ValueError(f'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(lowerCAmelCase__ , lowerCAmelCase__ ):
logger.info(f'''{name} was ignored''' )
continue
__a = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , hf_model.config.feat_extract_norm == '''group''' , )
__a = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__a , __a = key.split('''.*.''' )
if prefix in name and suffix in name:
__a = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__a = True
if "*" in mapped_key:
__a = name.split(lowerCAmelCase__ )[0].split('''.''' )[-2]
__a = mapped_key.replace('''*''' , lowerCAmelCase__ )
if "weight_g" in name:
__a = '''weight_g'''
elif "weight_v" in name:
__a = '''weight_v'''
elif "bias" in name:
__a = '''bias'''
elif "weight" in name:
__a = '''weight'''
elif "running_mean" in name:
__a = '''running_mean'''
elif "running_var" in name:
__a = '''running_var'''
elif "num_batches_tracked" in name:
__a = '''num_batches_tracked'''
else:
__a = None
set_recursively(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowercase ( lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
__a = full_name.split('''conv_layers.''' )[-1]
__a = name.split('''.''' )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__a = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(lowerCAmelCase__ )
@torch.no_grad()
def lowercase ( lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , ) -> Any:
if config_path is not None:
__a = SpeechTaConfig.from_pretrained(lowerCAmelCase__ )
else:
__a = SpeechTaConfig()
if task == "s2t":
__a = config.max_text_positions
__a = SpeechTaForSpeechToText(lowerCAmelCase__ )
elif task == "t2s":
__a = 1876
__a = 600
__a = config.max_speech_positions
__a = SpeechTaForTextToSpeech(lowerCAmelCase__ )
elif task == "s2s":
__a = 1876
__a = config.max_speech_positions
__a = SpeechTaForSpeechToSpeech(lowerCAmelCase__ )
else:
raise ValueError(f'''Unknown task name: {task}''' )
if vocab_path:
__a = SpeechTaTokenizer(lowerCAmelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__a = AddedToken('''<mask>''' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
__a = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
__a = SpeechTaFeatureExtractor()
__a = SpeechTaProcessor(tokenizer=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ )
processor.save_pretrained(lowerCAmelCase__ )
__a = torch.load(lowerCAmelCase__ )
recursively_load_weights(fairseq_checkpoint['''model'''] , lowerCAmelCase__ , lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ )
model.push_to_hub(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
lowercase_ = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 369 |
"""simple docstring"""
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Dict = DistilBertTokenizer
__UpperCAmelCase : Any = DistilBertTokenizerFast
__UpperCAmelCase : int = True
@slow
def __UpperCAmelCase ( self ):
__a = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
__a = tokenizer.encode('''sequence builders''' , add_special_tokens=_a )
__a = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_a )
__a = tokenizer.build_inputs_with_special_tokens(_a )
__a = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 11 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : List[str] = 'yolos'
def __init__( self , _a=768 , _a=12 , _a=12 , _a=3_072 , _a="gelu" , _a=0.0 , _a=0.0 , _a=0.02 , _a=1E-12 , _a=[512, 864] , _a=16 , _a=3 , _a=True , _a=100 , _a=True , _a=False , _a=1 , _a=5 , _a=2 , _a=5 , _a=2 , _a=0.1 , **_a , ):
super().__init__(**_a )
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = intermediate_size
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = initializer_range
__a = layer_norm_eps
__a = image_size
__a = patch_size
__a = num_channels
__a = qkv_bias
__a = num_detection_tokens
__a = use_mid_position_embeddings
__a = auxiliary_loss
# Hungarian matcher
__a = class_cost
__a = bbox_cost
__a = giou_cost
# Loss coefficients
__a = bbox_loss_coefficient
__a = giou_loss_coefficient
__a = eos_coefficient
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = version.parse('1.11' )
@property
def __UpperCAmelCase ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def __UpperCAmelCase ( self ):
return 1E-4
@property
def __UpperCAmelCase ( self ):
return 12
| 370 |
"""simple docstring"""
from math import factorial, radians
def lowercase ( lowerCAmelCase__ : float , lowerCAmelCase__ : int = 18 , lowerCAmelCase__ : int = 10 ) -> float:
__a = angle_in_degrees - ((angle_in_degrees // 3_60.0) * 3_60.0)
# Converting from degrees to radians
__a = radians(lowerCAmelCase__ )
__a = angle_in_radians
__a = 3
__a = -1
for _ in range(lowerCAmelCase__ ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase__ )
__a = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase__ , lowerCAmelCase__ )
if __name__ == "__main__":
__import__("doctest").testmod()
| 11 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
"""simple docstring"""
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase ( lowerCAmelCase__ : dict ) -> tuple:
return (data["data"], data["target"])
def lowercase ( lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : np.ndarray ) -> np.ndarray:
__a = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowerCAmelCase__ , lowerCAmelCase__ )
# Predict target for test data
__a = xgb.predict(lowerCAmelCase__ )
__a = predictions.reshape(len(lowerCAmelCase__ ) , 1 )
return predictions
def lowercase ( ) -> None:
__a = fetch_california_housing()
__a , __a = data_handling(lowerCAmelCase__ )
__a , __a , __a , __a = train_test_split(
lowerCAmelCase__ , lowerCAmelCase__ , test_size=0.25 , random_state=1 )
__a = xgboost(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Error printing
print(f'''Mean Absolute Error : {mean_absolute_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
print(f'''Mean Square Error : {mean_squared_error(lowerCAmelCase__ , lowerCAmelCase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 11 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Optional[int] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : List[Any] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def A_ ( self ):
torch.manual_seed(0 )
_lowerCamelCase : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModel(lowercase )
def A_ ( self ):
_lowerCamelCase : str = self.dummy_uncond_unet
_lowerCamelCase : Tuple = DDIMScheduler()
_lowerCamelCase : Union[str, Any] = self.dummy_vq_model
_lowerCamelCase : Optional[int] = LDMPipeline(unet=lowercase , vqvae=lowercase , scheduler=lowercase )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : List[Any] = torch.manual_seed(0 )
_lowerCamelCase : Any = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' ).images
_lowerCamelCase : str = torch.manual_seed(0 )
_lowerCamelCase : int = ldm(generator=lowercase , num_inference_steps=2 , output_type='numpy' , return_dict=lowercase )[0]
_lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
_lowerCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCamelCase : List[Any] = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72] )
_lowerCamelCase : Dict = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256' )
ldm.to(lowercase )
ldm.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : int = torch.manual_seed(0 )
_lowerCamelCase : Any = ldm(generator=lowercase , num_inference_steps=5 , output_type='numpy' ).images
_lowerCamelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowerCamelCase : Optional[Any] = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47] )
_lowerCamelCase : Union[str, Any] = 1E-2 if torch_device != 'mps' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 12 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowercase__ = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowercase__ = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowercase__ = """zero2"""
lowercase__ = """zero3"""
lowercase__ = [ZEROa, ZEROa]
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCamelCase : List[str] = parameterized.to_safe_name('_'.join(str(lowercase__ ) for x in param.args ) )
return f'''{func.__name__}_{param_based_name}'''
# Cartesian-product of zero stages with models to test
lowercase__ = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
@require_torch_multi_gpu
@parameterized.expand(lowercase , name_func=lowercase )
def A_ ( self , lowercase , lowercase ):
self.run_and_check(
stage=lowercase , model=lowercase , distributed=lowercase , fpaa=lowercase , )
def A_ ( self , lowercase ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = True , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = models[model]
_lowerCamelCase : Optional[int] = self.run_trainer(
stage=lowercase , model_name=lowercase , eval_steps=lowercase , num_train_epochs=1 , distributed=lowercase , fpaa=lowercase , )
self.do_checks(lowercase )
return output_dir
def A_ ( self , lowercase , lowercase , lowercase = 10 , lowercase = 1 , lowercase = True , lowercase = True , ):
_lowerCamelCase : List[str] = self.get_auto_remove_tmp_dir('./xxx' , after=lowercase )
_lowerCamelCase : Any = F'''
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowercase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
'''.split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCamelCase : Optional[int] = F'''--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json'''.split()
_lowerCamelCase : Optional[Any] = [F'''{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py''']
_lowerCamelCase : Dict = self.get_launcher(lowercase )
_lowerCamelCase : Union[str, Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowercase , env=self.get_env() )
return output_dir
def A_ ( self , lowercase=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCamelCase : Any = min(2 , get_gpu_count() ) if distributed else 1
return F'''deepspeed --num_nodes 1 --num_gpus {num_gpus}'''.split() | 12 | 1 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
lowercase__ = logging.getLogger(__name__)
def _snake_case ( lowercase__=2 , lowercase__=3 , lowercase__=16 , lowercase__ = 10 , lowercase__ = 2 ):
def get_dataset(lowercase__ ):
_lowerCamelCase : List[Any] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowerCamelCase : Optional[int] = get_dataset(lowercase__ )
_lowerCamelCase : int = get_dataset(lowercase__ )
_lowerCamelCase : Optional[int] = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
_lowerCamelCase : List[str] = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
_lowerCamelCase : List[str] = []
for epoch in range(lowercase__ ):
# Train quickly
model.train()
for batch in dataloader:
_lowerCamelCase, _lowerCamelCase : Dict = batch
_lowerCamelCase : Optional[Any] = model(lowercase__ )
_lowerCamelCase : List[Any] = torch.nn.functional.mse_loss(lowercase__ , lowercase__ )
accelerator.backward(lowercase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ):
super().__init__()
_lowerCamelCase : Union[str, Any] = nn.Parameter(torch.randn(1 ) )
_lowerCamelCase : str = nn.Parameter(torch.randn(1 ) )
def A_ ( self , lowercase ):
return x * self.a + self.b
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCamelCase : Dict = DummyModel()
_lowerCamelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCamelCase, _lowerCamelCase : int = dummy_dataloaders()
_lowerCamelCase : Union[str, Any] = ProjectConfiguration(total_limit=1 , project_dir=lowercase , automatic_checkpoint_naming=lowercase )
# Train baseline
_lowerCamelCase : int = Accelerator(project_config=lowercase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCamelCase : Union[str, Any] = DummyModel()
_lowerCamelCase : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCamelCase, _lowerCamelCase : Dict = dummy_dataloaders()
# Train baseline
_lowerCamelCase : List[str] = Accelerator()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase )
# Save initial
_lowerCamelCase : str = os.path.join(lowercase , 'initial' )
accelerator.save_state(lowercase )
((_lowerCamelCase), (_lowerCamelCase)) : Union[str, Any] = model.a.item(), model.b.item()
_lowerCamelCase : Optional[int] = optimizer.state_dict()
_lowerCamelCase : Optional[int] = train(3 , lowercase , lowercase , lowercase , lowercase )
((_lowerCamelCase), (_lowerCamelCase)) : str = model.a.item(), model.b.item()
_lowerCamelCase : Tuple = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowerCamelCase : int = DummyModel()
_lowerCamelCase : Any = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCamelCase, _lowerCamelCase : Any = dummy_dataloaders()
_lowerCamelCase : Dict = Accelerator()
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Optional[int] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase )
accelerator.load_state(lowercase )
((_lowerCamelCase), (_lowerCamelCase)) : Optional[Any] = model.a.item(), model.b.item()
_lowerCamelCase : List[str] = optimizer.state_dict()
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
_lowerCamelCase : str = train(2 , lowercase , lowercase , lowercase , lowercase )
# Save everything
_lowerCamelCase : int = os.path.join(lowercase , 'checkpoint' )
accelerator.save_state(lowercase )
# Load everything back in and make sure all states work
accelerator.load_state(lowercase )
test_rands += train(1 , lowercase , lowercase , lowercase , lowercase )
((_lowerCamelCase), (_lowerCamelCase)) : str = model.a.item(), model.b.item()
_lowerCamelCase : List[Any] = optimizer.state_dict()
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCamelCase : Optional[int] = DummyModel()
_lowerCamelCase : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCamelCase, _lowerCamelCase : List[str] = dummy_dataloaders()
_lowerCamelCase : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=lowercase )
# Train baseline
_lowerCamelCase : str = Accelerator(project_dir=lowercase , project_config=lowercase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Dict = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase )
# Save initial
accelerator.save_state()
((_lowerCamelCase), (_lowerCamelCase)) : str = model.a.item(), model.b.item()
_lowerCamelCase : Tuple = optimizer.state_dict()
_lowerCamelCase : Optional[Any] = train(3 , lowercase , lowercase , lowercase , lowercase )
((_lowerCamelCase), (_lowerCamelCase)) : Optional[Any] = model.a.item(), model.b.item()
_lowerCamelCase : List[str] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowerCamelCase : str = DummyModel()
_lowerCamelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCamelCase, _lowerCamelCase : List[Any] = dummy_dataloaders()
_lowerCamelCase : Any = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowercase )
_lowerCamelCase : Tuple = Accelerator(project_dir=lowercase , project_config=lowercase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase )
accelerator.load_state(os.path.join(lowercase , 'checkpoints' , 'checkpoint_0' ) )
((_lowerCamelCase), (_lowerCamelCase)) : Tuple = model.a.item(), model.b.item()
_lowerCamelCase : int = optimizer.state_dict()
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
_lowerCamelCase : List[str] = train(2 , lowercase , lowercase , lowercase , lowercase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , lowercase , lowercase , lowercase , lowercase )
((_lowerCamelCase), (_lowerCamelCase)) : str = model.a.item(), model.b.item()
_lowerCamelCase : Optional[Any] = optimizer.state_dict()
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
self.assertEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = torch.tensor([1, 2, 3] )
_lowerCamelCase : Tuple = torch.tensor([2, 3, 4] )
_lowerCamelCase : Optional[Any] = DummyModel()
_lowerCamelCase : Optional[int] = torch.optim.Adam(net.parameters() )
_lowerCamelCase : Optional[int] = Accelerator()
with self.assertRaises(lowercase ) as ve:
accelerator.register_for_checkpointing(lowercase , lowercase , lowercase , lowercase )
_lowerCamelCase : Optional[int] = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCamelCase : Tuple = DummyModel()
_lowerCamelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowerCamelCase : str = torch.optim.lr_scheduler.StepLR(lowercase , step_size=1 , gamma=0.99 )
_lowerCamelCase, _lowerCamelCase : int = dummy_dataloaders()
_lowerCamelCase : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=lowercase )
# Train baseline
_lowerCamelCase : Dict = Accelerator(project_dir=lowercase , project_config=lowercase )
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Tuple = accelerator.prepare(
lowercase , lowercase , lowercase , lowercase , lowercase )
# Save initial
accelerator.save_state()
_lowerCamelCase : Optional[Any] = scheduler.state_dict()
train(3 , lowercase , lowercase , lowercase , lowercase , lowercase )
self.assertNotEqual(lowercase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(lowercase , scheduler.state_dict() )
def A_ ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowerCamelCase : str = DummyModel()
_lowerCamelCase : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=lowercase , total_limit=2 )
# Train baseline
_lowerCamelCase : Optional[int] = Accelerator(project_dir=lowercase , project_config=lowercase )
_lowerCamelCase : int = accelerator.prepare(lowercase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowercase , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def A_ ( self ):
_lowerCamelCase : Dict = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(lowercase , env=os.environ.copy() )
if __name__ == "__main__":
lowercase__ = """/tmp/accelerate/state_checkpointing"""
lowercase__ = DummyModel()
lowercase__ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
lowercase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
lowercase__ , lowercase__ = dummy_dataloaders()
lowercase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
lowercase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="""no""")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
lowercase__ , lowercase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
lowercase__ = group["""params"""][0].device
break
assert param_device.type == accelerator.device.type
lowercase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""cpu""")
for group in optimizer.param_groups:
lowercase__ = group["""params"""][0].device
break
assert (
param_device.type == torch.device("""cpu""").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""on_device""")
for group in optimizer.param_groups:
lowercase__ = group["""params"""][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="""Unsupported optimizer map location passed"""):
accelerator.load_state(os.path.join(savedir, """checkpoints""", """checkpoint_0"""), map_location="""invalid""")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 12 |
"""simple docstring"""
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = 8 , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Optional[Any] = do_rescale
_lowerCamelCase : Union[str, Any] = rescale_factor
_lowerCamelCase : Any = do_pad
_lowerCamelCase : Optional[int] = pad_size
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None ):
_lowerCamelCase, _lowerCamelCase : Tuple = get_image_size(lowercase )
_lowerCamelCase : Union[str, Any] = (old_height // size + 1) * size - old_height
_lowerCamelCase : Tuple = (old_width // size + 1) * size - old_width
return pad(lowercase , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[str] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Any = do_pad if do_pad is not None else self.do_pad
_lowerCamelCase : int = pad_size if pad_size is not None else self.pad_size
_lowerCamelCase : Dict = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Dict = [to_numpy_array(lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_pad:
_lowerCamelCase : str = [self.pad(lowercase , size=lowercase ) for image in images]
_lowerCamelCase : Any = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 12 | 1 |
"""simple docstring"""
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase__ = logging.get_logger(__name__)
@add_end_docstrings(lowercase )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
super().__init__(*lowercase , **lowercase )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def A_ ( self , lowercase=None ):
_lowerCamelCase : Optional[int] = {}
if top_k is not None:
_lowerCamelCase : Union[str, Any] = top_k
return {}, {}, postprocess_params
def __call__( self , lowercase , **lowercase ):
return super().__call__(lowercase , **lowercase )
def A_ ( self , lowercase ):
_lowerCamelCase : Optional[int] = load_image(lowercase )
_lowerCamelCase : int = self.image_processor(images=lowercase , return_tensors=self.framework )
return model_inputs
def A_ ( self , lowercase ):
_lowerCamelCase : str = self.model(**lowercase )
return model_outputs
def A_ ( self , lowercase , lowercase=5 ):
if top_k > self.model.config.num_labels:
_lowerCamelCase : Union[str, Any] = self.model.config.num_labels
if self.framework == "pt":
_lowerCamelCase : Any = model_outputs.logits.softmax(-1 )[0]
_lowerCamelCase, _lowerCamelCase : int = probs.topk(lowercase )
elif self.framework == "tf":
_lowerCamelCase : List[Any] = stable_softmax(model_outputs.logits , axis=-1 )[0]
_lowerCamelCase : int = tf.math.top_k(lowercase , k=lowercase )
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
_lowerCamelCase : Any = scores.tolist()
_lowerCamelCase : Optional[int] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(lowercase , lowercase )] | 12 |
"""simple docstring"""
import os
import string
import sys
lowercase__ = 1 << 8
lowercase__ = {
"""tab""": ord("""\t"""),
"""newline""": ord("""\r"""),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
lowercase__ = KEYMAP["""up"""]
lowercase__ = KEYMAP["""left"""]
if sys.platform == "win32":
lowercase__ = []
lowercase__ = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
lowercase__ = ord(str(i))
def _snake_case ( ):
if os.name == "nt":
import msvcrt
_lowerCamelCase : Any = 'mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
_lowerCamelCase : str = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowerCamelCase : List[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowerCamelCase : Union[str, Any] = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int'] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
_lowerCamelCase : List[Any] = chr(KEYMAP['esc'] )
except KeyError:
_lowerCamelCase : int = cha[1]
else:
_lowerCamelCase : Optional[int] = ch.decode(lowercase__ )
else:
_lowerCamelCase : Union[str, Any] = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_lowerCamelCase : List[str] = sys.stdin.fileno()
_lowerCamelCase : Tuple = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
_lowerCamelCase : Optional[Any] = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def _snake_case ( ):
_lowerCamelCase : int = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
_lowerCamelCase : Union[str, Any] = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
_lowerCamelCase : List[Any] = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 12 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""pixel_values"""]
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = True , lowercase = 1 / 255 , lowercase = None , lowercase = True , lowercase = None , lowercase = None , **lowercase , ):
super().__init__(**lowercase )
_lowerCamelCase : Any = size if size is not None else {'height': 224, 'width': 224}
_lowerCamelCase : List[str] = get_size_dict(lowercase )
_lowerCamelCase : int = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCamelCase : List[Any] = get_size_dict(lowercase , default_to_square=lowercase , param_name='crop_size' )
_lowerCamelCase : List[Any] = do_resize
_lowerCamelCase : str = do_rescale
_lowerCamelCase : Optional[Any] = do_normalize
_lowerCamelCase : Optional[int] = do_center_crop
_lowerCamelCase : Union[str, Any] = crop_size
_lowerCamelCase : Any = size
_lowerCamelCase : Union[str, Any] = resample
_lowerCamelCase : List[str] = rescale_factor
_lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_lowerCamelCase : Tuple = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self , lowercase , lowercase , lowercase = PILImageResampling.BILINEAR , lowercase = None , **lowercase , ):
_lowerCamelCase : Optional[Any] = get_size_dict(lowercase )
if "shortest_edge" in size:
_lowerCamelCase : List[str] = get_resize_output_image_size(lowercase , size=size['shortest_edge'] , default_to_square=lowercase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_lowerCamelCase : Optional[Any] = (size['height'], size['width'])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase , ):
_lowerCamelCase : int = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase , size=(size['height'], size['width']) , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase = None , **lowercase ):
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ):
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ):
_lowerCamelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCamelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
_lowerCamelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize
_lowerCamelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCamelCase : List[str] = crop_size if crop_size is not None else self.crop_size
_lowerCamelCase : Tuple = get_size_dict(lowercase , param_name='crop_size' , default_to_square=lowercase )
_lowerCamelCase : Any = resample if resample is not None else self.resample
_lowerCamelCase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCamelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_lowerCamelCase : Dict = image_std if image_std is not None else self.image_std
_lowerCamelCase : Dict = size if size is not None else self.size
_lowerCamelCase : List[str] = get_size_dict(lowercase )
if not is_batched(lowercase ):
_lowerCamelCase : List[Any] = [images]
if not valid_images(lowercase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_lowerCamelCase : Union[str, Any] = [to_numpy_array(lowercase ) for image in images]
if do_resize:
_lowerCamelCase : int = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
_lowerCamelCase : Any = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
_lowerCamelCase : str = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
_lowerCamelCase : List[Any] = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
_lowerCamelCase : List[str] = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
_lowerCamelCase : List[Any] = {'pixel_values': images}
return BatchFeature(data=lowercase , tensor_type=lowercase ) | 12 |
"""simple docstring"""
from typing import Any
def _snake_case ( lowercase__ ):
if not input_list:
return []
_lowerCamelCase : Any = [input_list.count(lowercase__ ) for value in input_list]
_lowerCamelCase : Dict = max(lowercase__ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(lowercase__ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 1 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ = 50_0000
lowercase__ , lowercase__ = os.path.split(__file__)
lowercase__ = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _snake_case ( lowercase__ , **lowercase__ ):
_lowerCamelCase : Union[str, Any] = dataset.map(**lowercase__ )
@get_duration
def _snake_case ( lowercase__ , **lowercase__ ):
_lowerCamelCase : int = dataset.filter(**lowercase__ )
def _snake_case ( ):
_lowerCamelCase : int = {'num examples': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCamelCase : Tuple = datasets.Features({'text': datasets.Value('string' ), 'numbers': datasets.Value('float32' )} )
_lowerCamelCase : Tuple = generate_example_dataset(
os.path.join(lowercase__ , 'dataset.arrow' ) , lowercase__ , num_examples=lowercase__ )
_lowerCamelCase : Optional[int] = transformers.AutoTokenizer.from_pretrained('bert-base-cased' , use_fast=lowercase__ )
def tokenize(lowercase__ ):
return tokenizer(examples['text'] )
_lowerCamelCase : Dict = map(lowercase__ )
_lowerCamelCase : Union[str, Any] = map(lowercase__ , batched=lowercase__ )
_lowerCamelCase : Dict = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='numpy' ):
_lowerCamelCase : Optional[Any] = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='pandas' ):
_lowerCamelCase : str = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='torch' , columns='numbers' ):
_lowerCamelCase : Tuple = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
with dataset.formatted_as(type='tensorflow' , columns='numbers' ):
_lowerCamelCase : Optional[int] = map(lowercase__ , function=lambda lowercase__ : None , batched=lowercase__ )
_lowerCamelCase : Tuple = map(lowercase__ , function=lowercase__ , batched=lowercase__ )
_lowerCamelCase : Optional[Any] = filter(lowercase__ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase__ , 'wb' ) as f:
f.write(json.dumps(lowercase__ ).encode('utf-8' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter() | 12 |
"""simple docstring"""
def _snake_case ( lowercase__ ):
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
_lowerCamelCase : List[str] = len(lowercase__ )
_lowerCamelCase : List[str] = max(lowercase__ )
_lowerCamelCase : List[str] = min(lowercase__ )
# create the counting array
_lowerCamelCase : List[Any] = coll_max + 1 - coll_min
_lowerCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowercase__ ):
_lowerCamelCase : Optional[int] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_lowerCamelCase : Dict = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowercase__ ) ):
_lowerCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _snake_case ( lowercase__ ):
return "".join([chr(lowercase__ ) for i in counting_sort([ord(lowercase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("""thisisthestring""") == "eghhiiinrsssttt"
lowercase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase__ = [int(item) for item in user_input.split(""",""")]
print(counting_sort(unsorted)) | 12 | 1 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase__ = imread(R"""digital_image_processing/image_data/lena_small.jpg""")
lowercase__ = cvtColor(img, COLOR_BGR2GRAY)
def _snake_case ( ):
_lowerCamelCase : List[str] = cn.convert_to_negative(lowercase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def _snake_case ( ):
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(lowercase__ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def _snake_case ( ):
_lowerCamelCase : int = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def _snake_case ( ):
_lowerCamelCase : int = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
_lowerCamelCase : List[Any] = canny.canny(lowercase__ )
# assert canny array for at least one True
assert canny_array.any()
def _snake_case ( ):
assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all()
def _snake_case ( ):
# laplace diagonals
_lowerCamelCase : Any = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
_lowerCamelCase : int = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ )
assert res.any()
def _snake_case ( ):
assert med.median_filter(lowercase__ , 3 ).any()
def _snake_case ( ):
_lowerCamelCase, _lowerCamelCase : List[Any] = sob.sobel_filter(lowercase__ )
assert grad.any() and theta.any()
def _snake_case ( ):
_lowerCamelCase : Dict = sp.make_sepia(lowercase__ , 20 )
assert sepia.all()
def _snake_case ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" ):
_lowerCamelCase : Any = bs.Burkes(imread(lowercase__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def _snake_case ( lowercase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
_lowerCamelCase : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def _snake_case ( ):
_lowerCamelCase : str = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
_lowerCamelCase : List[str] = imread(lowercase__ , 0 )
# Test for get_neighbors_pixel function() return not None
_lowerCamelCase : Union[str, Any] = 0
_lowerCamelCase : int = 0
_lowerCamelCase : Union[str, Any] = image[x_coordinate][y_coordinate]
_lowerCamelCase : Optional[int] = lbp.get_neighbors_pixel(
lowercase__ , lowercase__ , lowercase__ , lowercase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
_lowerCamelCase : Dict = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
_lowerCamelCase : Optional[Any] = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ )
assert lbp_image.any() | 12 |
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert."""
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"""--original_config_file""",
default=None,
type=str,
help="""The YAML config file corresponding to the original architecture.""",
)
parser.add_argument(
"""--num_in_channels""",
default=None,
type=int,
help="""The number of input channels. If `None` number of input channels will be automatically inferred.""",
)
parser.add_argument(
"""--scheduler_type""",
default="""pndm""",
type=str,
help="""Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']""",
)
parser.add_argument(
"""--pipeline_type""",
default=None,
type=str,
help=(
"""The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"""
""". If `None` pipeline will be automatically inferred."""
),
)
parser.add_argument(
"""--image_size""",
default=None,
type=int,
help=(
"""The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"""
""" Base. Use 768 for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--prediction_type""",
default=None,
type=str,
help=(
"""The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"""
""" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."""
),
)
parser.add_argument(
"""--extract_ema""",
action="""store_true""",
help=(
"""Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"""
""" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"""
""" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."""
),
)
parser.add_argument(
"""--upcast_attention""",
action="""store_true""",
help=(
"""Whether the attention computation should always be upcasted. This is necessary when running stable"""
""" diffusion 2.1."""
),
)
parser.add_argument(
"""--from_safetensors""",
action="""store_true""",
help="""If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.""",
)
parser.add_argument(
"""--to_safetensors""",
action="""store_true""",
help="""Whether to store pipeline in safetensors format or not.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""")
parser.add_argument(
"""--stable_unclip""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.""",
)
parser.add_argument(
"""--stable_unclip_prior""",
type=str,
default=None,
required=False,
help="""Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.""",
)
parser.add_argument(
"""--clip_stats_path""",
type=str,
help="""Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.""",
required=False,
)
parser.add_argument(
"""--controlnet""", action="""store_true""", default=None, help="""Set flag if this is a controlnet checkpoint."""
)
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--vae_path""",
type=str,
default=None,
required=False,
help="""Set to a path, hub id to an already converted vae to not convert it again.""",
)
lowercase__ = parser.parse_args()
lowercase__ = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 12 | 1 |
"""simple docstring"""
import json
import re
from typing import TYPE_CHECKING, List, Optional, Tuple, Union
import numpy as np
from ...utils import is_tf_available, is_torch_available, logging
if TYPE_CHECKING:
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_codegen import CodeGenTokenizer
lowercase__ = logging.get_logger(__name__)
lowercase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowercase__ = {
"""vocab_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json""",
},
"""merges_file""": {
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""Salesforce/codegen-350M-mono""": (
"""https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json"""
),
},
}
lowercase__ = {
"""Salesforce/codegen-350M-mono""": 2048,
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ["""input_ids""", """attention_mask"""]
lowerCamelCase__ = CodeGenTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=None , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase="<|endoftext|>" , lowercase=False , **lowercase , ):
super().__init__(
lowercase , lowercase , tokenizer_file=lowercase , unk_token=lowercase , bos_token=lowercase , eos_token=lowercase , add_prefix_space=lowercase , **lowercase , )
if kwargs.pop('add_bos_token' , lowercase ):
_lowerCamelCase : str = kwargs.pop('name_or_path' , '' )
raise ValueError(
'Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.'
'Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n'
F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n'''
F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n'''
'This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.'
' so that the fast tokenizer works correctly.' )
_lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowercase ) != add_prefix_space:
_lowerCamelCase : Dict = getattr(lowercase , pre_tok_state.pop('type' ) )
_lowerCamelCase : Any = add_prefix_space
_lowerCamelCase : List[Any] = pre_tok_class(**lowercase )
_lowerCamelCase : str = add_prefix_space
def A_ ( self , *lowercase , **lowercase ):
_lowerCamelCase : Any = kwargs.get('is_split_into_words' , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
_lowerCamelCase : List[str] = kwargs.get('is_split_into_words' , lowercase )
assert self.add_prefix_space or not is_split_into_words, (
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowercase , **lowercase )
def A_ ( self , lowercase , lowercase = None ):
_lowerCamelCase : Optional[Any] = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
def A_ ( self , lowercase , lowercase = False , lowercase = None , lowercase = None , **lowercase , ):
_lowerCamelCase : List[Any] = super().decode(
token_ids=lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase , **lowercase , )
if truncate_before_pattern is not None and len(lowercase ) > 0:
_lowerCamelCase : Dict = self.truncate(lowercase , lowercase )
return decoded_text
def A_ ( self , lowercase , lowercase ):
def find_re(lowercase , lowercase , lowercase ):
_lowerCamelCase : List[str] = pattern.search(lowercase , lowercase )
return m.start() if m else -1
_lowerCamelCase : Dict = [re.compile(lowercase , re.MULTILINE ) for pattern in truncate_before_pattern]
_lowerCamelCase : Tuple = list(re.finditer('^print' , lowercase , re.MULTILINE ) )
if len(lowercase ) > 1:
_lowerCamelCase : Dict = completion[: prints[1].start()]
_lowerCamelCase : Dict = list(re.finditer('^def' , lowercase , re.MULTILINE ) )
if len(lowercase ) > 1:
_lowerCamelCase : Tuple = completion[: defs[1].start()]
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[Any] = [
pos for pos in [find_re(lowercase , lowercase , lowercase ) for terminal in terminals] if pos != -1
]
if len(lowercase ) > 0:
return completion[: min(lowercase )]
else:
return completion | 12 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = (UnCLIPScheduler,)
def A_ ( self , **lowercase ):
_lowerCamelCase : Any = {
'num_train_timesteps': 1000,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**lowercase )
return config
def A_ ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase )
def A_ ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=lowercase )
def A_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=lowercase )
def A_ ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=lowercase )
def A_ ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=lowercase )
def A_ ( self ):
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=lowercase , prev_timestep=lowercase )
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[int] = self.get_scheduler_config(variance_type='fixed_small_log' )
_lowerCamelCase : str = scheduler_class(**lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_54_96_25 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9_99_49_87 ) ) < 1E-5
def A_ ( self ):
_lowerCamelCase : List[str] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config(variance_type='learned_range' )
_lowerCamelCase : int = scheduler_class(**lowercase )
_lowerCamelCase : List[str] = 0.5
assert scheduler._get_variance(1 , predicted_variance=lowercase ) - -10.1_71_27_90 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=lowercase ) - -5.7_99_80_52 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=lowercase ) - -0.0_01_00_11 < 1E-5
def A_ ( self ):
_lowerCamelCase : List[Any] = self.scheduler_classes[0]
_lowerCamelCase : Optional[Any] = self.get_scheduler_config()
_lowerCamelCase : Tuple = scheduler_class(**lowercase )
_lowerCamelCase : Union[str, Any] = scheduler.timesteps
_lowerCamelCase : Any = self.dummy_model()
_lowerCamelCase : Optional[Any] = self.dummy_sample_deter
_lowerCamelCase : Optional[int] = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : Tuple = model(lowercase , lowercase )
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : List[Any] = scheduler.step(lowercase , lowercase , lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : Optional[int] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[Any] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1E-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1E-3
def A_ ( self ):
_lowerCamelCase : Tuple = self.scheduler_classes[0]
_lowerCamelCase : str = self.get_scheduler_config()
_lowerCamelCase : Optional[Any] = scheduler_class(**lowercase )
scheduler.set_timesteps(25 )
_lowerCamelCase : Optional[Any] = scheduler.timesteps
_lowerCamelCase : Optional[int] = self.dummy_model()
_lowerCamelCase : Any = self.dummy_sample_deter
_lowerCamelCase : str = torch.manual_seed(0 )
for i, t in enumerate(lowercase ):
# 1. predict noise residual
_lowerCamelCase : List[Any] = model(lowercase , lowercase )
if i + 1 == timesteps.shape[0]:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : List[str] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_lowerCamelCase : Union[str, Any] = scheduler.step(
lowercase , lowercase , lowercase , prev_timestep=lowercase , generator=lowercase ).prev_sample
_lowerCamelCase : List[Any] = pred_prev_sample
_lowerCamelCase : Optional[Any] = torch.sum(torch.abs(lowercase ) )
_lowerCamelCase : List[str] = torch.mean(torch.abs(lowercase ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1E-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1E-3
def A_ ( self ):
pass
def A_ ( self ):
pass | 12 | 1 |
"""simple docstring"""
from math import factorial
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(lowercase__ , lowercase__ ) or not isinstance(lowercase__ , lowercase__ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_lowerCamelCase : List[str] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_lowerCamelCase : str = float(factorial(lowercase__ ) )
coefficient /= factorial(lowercase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75)) | 12 |
"""simple docstring"""
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """data2vec-audio"""
def __init__( self , lowercase=32 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=0.0 , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=1E-5 , lowercase="gelu" , lowercase=(512, 512, 512, 512, 512, 512, 512) , lowercase=(5, 2, 2, 2, 2, 2, 2) , lowercase=(10, 3, 3, 3, 3, 2, 2) , lowercase=False , lowercase=16 , lowercase=19 , lowercase=5 , lowercase=0.05 , lowercase=10 , lowercase=2 , lowercase=0.0 , lowercase=10 , lowercase=0 , lowercase="sum" , lowercase=False , lowercase=False , lowercase=256 , lowercase=(512, 512, 512, 512, 1500) , lowercase=(5, 3, 3, 1, 1) , lowercase=(1, 2, 3, 1, 1) , lowercase=512 , lowercase=0 , lowercase=1 , lowercase=2 , lowercase=False , lowercase=3 , lowercase=2 , lowercase=3 , lowercase=None , **lowercase , ):
super().__init__(**lowercase , pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase )
_lowerCamelCase : str = hidden_size
_lowerCamelCase : str = feat_extract_activation
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Dict = list(lowercase )
_lowerCamelCase : Optional[Any] = conv_bias
_lowerCamelCase : Union[str, Any] = num_conv_pos_embeddings
_lowerCamelCase : List[Any] = num_conv_pos_embedding_groups
_lowerCamelCase : List[Any] = conv_pos_kernel_size
_lowerCamelCase : Optional[int] = len(self.conv_dim )
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : List[str] = hidden_act
_lowerCamelCase : Tuple = num_attention_heads
_lowerCamelCase : Any = hidden_dropout
_lowerCamelCase : Union[str, Any] = attention_dropout
_lowerCamelCase : str = activation_dropout
_lowerCamelCase : Any = feat_proj_dropout
_lowerCamelCase : Tuple = final_dropout
_lowerCamelCase : Union[str, Any] = layerdrop
_lowerCamelCase : List[Any] = layer_norm_eps
_lowerCamelCase : Optional[Any] = initializer_range
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_lowerCamelCase : Optional[Any] = mask_time_prob
_lowerCamelCase : List[Any] = mask_time_length
_lowerCamelCase : List[Any] = mask_time_min_masks
_lowerCamelCase : Tuple = mask_feature_prob
_lowerCamelCase : Optional[Any] = mask_feature_length
_lowerCamelCase : Dict = mask_feature_min_masks
# ctc loss
_lowerCamelCase : Tuple = ctc_loss_reduction
_lowerCamelCase : str = ctc_zero_infinity
# adapter
_lowerCamelCase : Union[str, Any] = add_adapter
_lowerCamelCase : List[Any] = adapter_kernel_size
_lowerCamelCase : Optional[Any] = adapter_stride
_lowerCamelCase : List[Any] = num_adapter_layers
_lowerCamelCase : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_lowerCamelCase : Optional[int] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_lowerCamelCase : List[str] = list(lowercase )
_lowerCamelCase : Optional[Any] = list(lowercase )
_lowerCamelCase : Any = list(lowercase )
_lowerCamelCase : Optional[Any] = xvector_output_dim
@property
def A_ ( self ):
return math.prod(self.conv_stride ) | 12 | 1 |
"""simple docstring"""
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase=1024 , lowercase=1024 , lowercase=3.6 ):
_lowerCamelCase : str = tokenizer
_lowerCamelCase : Optional[int] = tokenizer.bos_token_id
_lowerCamelCase : Tuple = dataset
_lowerCamelCase : Any = seq_length
_lowerCamelCase : Any = seq_length * chars_per_token * num_of_sequences
def __iter__( self ):
_lowerCamelCase : Optional[Any] = iter(self.dataset )
_lowerCamelCase : Optional[int] = True
while more_examples:
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowercase )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
_lowerCamelCase : Optional[Any] = False
break
_lowerCamelCase : List[str] = tokenizer(lowercase , truncation=lowercase )['input_ids']
_lowerCamelCase : Optional[Any] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowercase ) , self.seq_length ):
_lowerCamelCase : Dict = all_token_ids[i : i + self.seq_length]
if len(lowercase ) == self.seq_length:
yield torch.tensor(lowercase )
def _snake_case ( lowercase__ ):
_lowerCamelCase : Union[str, Any] = {'streaming': True}
_lowerCamelCase : List[Any] = load_dataset(args.dataset_name , split='train' , **lowercase__ )
_lowerCamelCase : List[str] = ConstantLengthDataset(lowercase__ , lowercase__ , seq_length=args.seq_length )
_lowerCamelCase : List[str] = DataLoader(lowercase__ , batch_size=args.batch_size )
return eval_dataloader
def _snake_case ( lowercase__ ):
model.eval()
_lowerCamelCase : List[str] = []
for step, batch in enumerate(lowercase__ ):
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(lowercase__ , labels=lowercase__ )
_lowerCamelCase : Optional[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
_lowerCamelCase : Optional[int] = torch.mean(torch.cat(lowercase__ ) )
try:
_lowerCamelCase : str = torch.exp(lowercase__ )
except OverflowError:
_lowerCamelCase : Dict = float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowercase__ = Accelerator()
# Parse configuration
lowercase__ = HfArgumentParser(EvaluationArguments)
lowercase__ = parser.parse_args()
set_seed(args.seed)
# Logging
lowercase__ = logging.getLogger(__name__)
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
# Load model and tokenizer
lowercase__ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowercase__ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowercase__ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowercase__ , lowercase__ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info("""Evaluating and saving model after training""")
lowercase__ , lowercase__ = evaluate(args)
logger.info(F"loss/eval: {eval_loss}, perplexity: {perplexity}") | 12 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
lowercase__ = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """facebook/nllb-200-distilled-600M"""
lowerCamelCase__ = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
lowerCamelCase__ = """translator"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ["""text""", """text""", """text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase , lowercase , lowercase ):
if src_lang not in self.lang_to_code:
raise ValueError(F'''{src_lang} is not a supported language.''' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'''{tgt_lang} is not a supported language.''' )
_lowerCamelCase : str = self.lang_to_code[src_lang]
_lowerCamelCase : int = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
lowercase , return_tensors='pt' , src_lang=lowercase , tgt_lang=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )
def A_ ( self , lowercase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=lowercase ) | 12 | 1 |
"""simple docstring"""
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
# Initialise PyTorch model
_lowerCamelCase : Optional[Any] = RemBertConfig.from_json_file(lowercase__ )
print('Building PyTorch model from configuration: {}'.format(str(lowercase__ ) ) )
_lowerCamelCase : List[str] = RemBertModel(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print('Save PyTorch model to {}'.format(lowercase__ ) )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowercase__ = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path) | 12 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Tuple = VideoClassificationPipeline(model=lowercase , image_processor=lowercase , top_k=2 )
_lowerCamelCase : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def A_ ( self , lowercase , lowercase ):
for example in examples:
_lowerCamelCase : Tuple = video_classifier(lowercase )
self.assertEqual(
lowercase , [
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
{'score': ANY(lowercase ), 'label': ANY(lowercase )},
] , )
@require_torch
def A_ ( self ):
_lowerCamelCase : Optional[Any] = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
_lowerCamelCase : Tuple = VideoMAEFeatureExtractor(
size={'shortest_edge': 10} , crop_size={'height': 10, 'width': 10} )
_lowerCamelCase : Dict = pipeline(
'video-classification' , model=lowercase , feature_extractor=lowercase , frame_sampling_rate=4 )
_lowerCamelCase : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
_lowerCamelCase : Dict = video_classifier(lowercase , top_k=2 )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
_lowerCamelCase : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(lowercase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def A_ ( self ):
pass | 12 | 1 |
"""simple docstring"""
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( lowercase, lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase , lowercase = None , lowercase = None ):
super().__init__()
_lowerCamelCase : Dict = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
_lowerCamelCase : int = torch.zeros(lowercase , lowercase )
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : List[str] = torch.nn.Parameter(lowercase )
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , ):
super().__init__()
self.register_modules(
vqvae=lowercase , transformer=lowercase , text_encoder=lowercase , tokenizer=lowercase , scheduler=lowercase , learned_classifier_free_sampling_embeddings=lowercase , )
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Tuple = len(lowercase ) if isinstance(lowercase , lowercase ) else 1
# get prompt text embeddings
_lowerCamelCase : Tuple = self.tokenizer(
lowercase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCamelCase : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCamelCase : Tuple = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_lowerCamelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCamelCase : List[str] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
_lowerCamelCase : Optional[int] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=lowercase )
# duplicate text embeddings for each generation per prompt
_lowerCamelCase : Optional[int] = prompt_embeds.repeat_interleave(lowercase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
_lowerCamelCase : Optional[Any] = self.learned_classifier_free_sampling_embeddings.embeddings
_lowerCamelCase : Optional[int] = negative_prompt_embeds.unsqueeze(0 ).repeat(lowercase , 1 , 1 )
else:
_lowerCamelCase : Optional[int] = [''] * batch_size
_lowerCamelCase : Optional[int] = text_input_ids.shape[-1]
_lowerCamelCase : List[Any] = self.tokenizer(
lowercase , padding='max_length' , max_length=lowercase , truncation=lowercase , return_tensors='pt' , )
_lowerCamelCase : Dict = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
_lowerCamelCase : Optional[int] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=lowercase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase : List[str] = negative_prompt_embeds.shape[1]
_lowerCamelCase : int = negative_prompt_embeds.repeat(1 , lowercase , 1 )
_lowerCamelCase : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase : str = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , lowercase , lowercase = 100 , lowercase = 5.0 , lowercase = 1.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , lowercase = None , lowercase = 1 , ):
if isinstance(lowercase , lowercase ):
_lowerCamelCase : str = 1
elif isinstance(lowercase , lowercase ):
_lowerCamelCase : List[Any] = len(lowercase )
else:
raise ValueError(F'''`prompt` has to be of type `str` or `list` but is {type(lowercase )}''' )
_lowerCamelCase : str = batch_size * num_images_per_prompt
_lowerCamelCase : List[Any] = guidance_scale > 1.0
_lowerCamelCase : List[str] = self._encode_prompt(lowercase , lowercase , lowercase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase , lowercase ) or callback_steps <= 0)
):
raise ValueError(
F'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
F''' {type(lowercase )}.''' )
# get the initial completely masked latents unless the user supplied it
_lowerCamelCase : Tuple = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
_lowerCamelCase : Tuple = self.transformer.num_vector_embeds - 1
_lowerCamelCase : List[Any] = torch.full(lowercase , lowercase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
F''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
_lowerCamelCase : List[str] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(lowercase , device=self.device )
_lowerCamelCase : int = self.scheduler.timesteps.to(self.device )
_lowerCamelCase : Tuple = latents
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the sample if we are doing classifier free guidance
_lowerCamelCase : Optional[Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
_lowerCamelCase : str = self.transformer(lowercase , encoder_hidden_states=lowercase , timestep=lowercase ).sample
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : str = model_output.chunk(2 )
_lowerCamelCase : List[Any] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(lowercase , dim=1 , keepdim=lowercase )
_lowerCamelCase : Dict = self.truncate(lowercase , lowercase )
# remove `log(0)`'s (`-inf`s)
_lowerCamelCase : Optional[int] = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(lowercase , timestep=lowercase , sample=lowercase , generator=lowercase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase , lowercase , lowercase )
_lowerCamelCase : Optional[int] = self.vqvae.config.vq_embed_dim
_lowerCamelCase : Dict = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
_lowerCamelCase : Optional[int] = self.vqvae.quantize.get_codebook_entry(lowercase , shape=lowercase )
_lowerCamelCase : int = self.vqvae.decode(lowercase , force_not_quantize=lowercase ).sample
_lowerCamelCase : Any = (image / 2 + 0.5).clamp(0 , 1 )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_lowerCamelCase : Union[str, Any] = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase )
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase, _lowerCamelCase : int = torch.sort(lowercase , 1 , descending=lowercase )
_lowerCamelCase : int = torch.exp(lowercase )
_lowerCamelCase : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
_lowerCamelCase : List[str] = torch.full_like(keep_mask[:, 0:1, :] , lowercase )
_lowerCamelCase : Dict = torch.cat((all_true, keep_mask) , dim=1 )
_lowerCamelCase : List[Any] = keep_mask[:, :-1, :]
_lowerCamelCase : str = keep_mask.gather(1 , indices.argsort(1 ) )
_lowerCamelCase : List[str] = log_p_x_0.clone()
_lowerCamelCase : List[str] = -torch.inf # -inf = log(0)
return rv | 12 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase__ = {
"""configuration_mega""": ["""MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MegaConfig""", """MegaOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""MEGA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MegaForCausalLM""",
"""MegaForMaskedLM""",
"""MegaForMultipleChoice""",
"""MegaForQuestionAnswering""",
"""MegaForSequenceClassification""",
"""MegaForTokenClassification""",
"""MegaModel""",
"""MegaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 12 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = ["""image_processor""", """tokenizer"""]
lowerCamelCase__ = """BlipImageProcessor"""
lowerCamelCase__ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Tuple = False
super().__init__(lowercase , lowercase )
_lowerCamelCase : Tuple = self.image_processor
def __call__( self , lowercase = None , lowercase = None , lowercase = True , lowercase = False , lowercase = None , lowercase = None , lowercase = 0 , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = False , lowercase = True , lowercase = None , **lowercase , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
_lowerCamelCase : List[Any] = self.tokenizer
_lowerCamelCase : List[str] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
return text_encoding
# add pixel_values
_lowerCamelCase : Optional[int] = self.image_processor(lowercase , return_tensors=lowercase )
if text is not None:
_lowerCamelCase : Optional[int] = self.tokenizer(
text=lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , stride=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , return_overflowing_tokens=lowercase , return_special_tokens_mask=lowercase , return_offsets_mapping=lowercase , return_token_type_ids=lowercase , return_length=lowercase , verbose=lowercase , return_tensors=lowercase , **lowercase , )
else:
_lowerCamelCase : str = None
if text_encoding is not None:
encoding_image_processor.update(lowercase )
return encoding_image_processor
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
_lowerCamelCase : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 12 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _snake_case ( lowercase__ , lowercase__ , lowercase__=None , lowercase__=None ):
if attention_mask is None:
_lowerCamelCase : List[str] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCAmelCase__ :
'''simple docstring'''
lowerCamelCase__ = OPTConfig
lowerCamelCase__ = {}
lowerCamelCase__ = """gelu"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=16 , lowercase=2 , lowercase=4 , lowercase=4 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , lowercase=16 , lowercase=16 , ):
_lowerCamelCase : Tuple = parent
_lowerCamelCase : Any = batch_size
_lowerCamelCase : Tuple = seq_length
_lowerCamelCase : str = is_training
_lowerCamelCase : Optional[int] = use_labels
_lowerCamelCase : List[Any] = vocab_size
_lowerCamelCase : Dict = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Any = intermediate_size
_lowerCamelCase : Dict = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : List[str] = attention_probs_dropout_prob
_lowerCamelCase : Optional[Any] = max_position_embeddings
_lowerCamelCase : List[Any] = eos_token_id
_lowerCamelCase : Tuple = pad_token_id
_lowerCamelCase : List[str] = bos_token_id
_lowerCamelCase : Optional[int] = embed_dim
_lowerCamelCase : List[str] = word_embed_proj_dim
_lowerCamelCase : Any = False
def A_ ( self ):
_lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_lowerCamelCase : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCamelCase : str = tf.concat([input_ids, eos_tensor] , axis=1 )
_lowerCamelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase , **self.config_updates , )
_lowerCamelCase : int = prepare_opt_inputs_dict(lowercase , lowercase )
return config, inputs_dict
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : Optional[Any] = TFOPTModel(config=lowercase )
_lowerCamelCase : Optional[Any] = inputs_dict['input_ids']
_lowerCamelCase : str = input_ids[:1, :]
_lowerCamelCase : Dict = inputs_dict['attention_mask'][:1, :]
_lowerCamelCase : Optional[Any] = 1
# first forward pass
_lowerCamelCase : Any = model(lowercase , attention_mask=lowercase , use_cache=lowercase )
_lowerCamelCase, _lowerCamelCase : List[str] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCamelCase : Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowerCamelCase : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
_lowerCamelCase : Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_lowerCamelCase : Optional[Any] = model(lowercase , attention_mask=lowercase )[0]
_lowerCamelCase : List[str] = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_lowerCamelCase : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_lowerCamelCase : Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
_lowerCamelCase : List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1E-3 )
@require_tf
class lowerCAmelCase__ ( lowercase, lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowerCamelCase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowerCamelCase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = 10
def A_ ( self ):
_lowerCamelCase : int = TFOPTModelTester(self )
_lowerCamelCase : Tuple = ConfigTester(self , config_class=lowercase )
def A_ ( self ):
self.config_tester.run_common_tests()
def A_ ( self ):
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
def A_ ( self ):
_lowerCamelCase, _lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowercase , lowercase ):
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowercase , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_lowerCamelCase : Optional[int] = model_class(config=lowercase )
_lowerCamelCase : int = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Tuple = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowercase )
_lowerCamelCase : str = _get_word_embedding_weight(lowercase , model.get_input_embeddings() )
_lowerCamelCase : Any = _get_word_embedding_weight(lowercase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_lowerCamelCase : Union[str, Any] = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowercase )
# check that weights remain the same after resizing
_lowerCamelCase : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Optional[Any] = False
self.assertTrue(lowercase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowercase )
_lowerCamelCase : Dict = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_lowerCamelCase : Union[str, Any] = False
self.assertTrue(lowercase )
def _snake_case ( lowercase__ ):
return tf.constant(lowercase__ , dtype=tf.intaa )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = 99
def A_ ( self ):
_lowerCamelCase : Tuple = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_lowerCamelCase : Tuple = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_lowerCamelCase : int = input_ids.shape[0]
_lowerCamelCase : List[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A_ ( self ):
_lowerCamelCase : Tuple = TFOPTModel.from_pretrained('facebook/opt-350m' )
_lowerCamelCase : List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_lowerCamelCase : List[str] = tf.not_equal(lowercase , model.config.pad_token_id )
with tf.GradientTape():
_lowerCamelCase : List[str] = model(input_ids=lowercase , attention_mask=lowercase ).last_hidden_state
_lowerCamelCase : Optional[Any] = (1, 11, 512)
self.assertEqual(output.shape , lowercase )
_lowerCamelCase : List[str] = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-3 ) )
_lowerCamelCase : List[str] = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : Union[str, Any] = xla_generate(lowercase , lowercase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowercase , atol=4E-2 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
super().setUp()
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
def A_ ( self ):
_lowerCamelCase : int = TFOPTForCausalLM.from_pretrained(self.path_model )
_lowerCamelCase : List[Any] = GPTaTokenizer.from_pretrained(self.path_model )
_lowerCamelCase : List[str] = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' , padding=lowercase , add_special_tokens=lowercase )
_lowerCamelCase : Optional[int] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_lowerCamelCase : Any = tf.constant(
[
[1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
_lowerCamelCase : Tuple = tf.function(lowercase , jit_compile=lowercase )
_lowerCamelCase : List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowercase , lowercase , atol=1E-4 ) )
@require_tf
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def A_ ( self ):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def A_ ( self ):
_lowerCamelCase : str = 'facebook/opt-125m'
_lowerCamelCase : Dict = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Dict = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : int = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : int = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Any = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = 'facebook/opt-350m'
_lowerCamelCase : int = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase )
_lowerCamelCase : Any = 'left'
# use different length sentences to test batching
_lowerCamelCase : Optional[int] = [
'Hello, my dog is a little',
'Today, I',
]
_lowerCamelCase : Dict = tokenizer(lowercase , return_tensors='tf' , padding=lowercase )
_lowerCamelCase : int = inputs['input_ids']
_lowerCamelCase : Tuple = model.generate(input_ids=lowercase , attention_mask=inputs['attention_mask'] )
_lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase )
_lowerCamelCase : Dict = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_lowerCamelCase : int = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_lowerCamelCase : Union[str, Any] = model.generate(input_ids=lowercase , max_length=model.config.max_length - num_paddings )
_lowerCamelCase : List[Any] = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
_lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase )
_lowerCamelCase : Optional[Any] = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(lowercase , lowercase )
self.assertListEqual(lowercase , [non_padded_sentence, padded_sentence] )
def A_ ( self ):
_lowerCamelCase : Tuple = 'facebook/opt-350m'
_lowerCamelCase : List[Any] = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : Optional[Any] = GPTaTokenizer.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase )
for prompt in self.prompts:
_lowerCamelCase : List[Any] = tokenizer(lowercase , return_tensors='tf' ).input_ids
_lowerCamelCase : Optional[Any] = model.generate(lowercase , max_length=10 )
_lowerCamelCase : Dict = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase )
predicted_outputs += generated_string
self.assertListEqual(lowercase , lowercase ) | 12 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , ):
_lowerCamelCase : Optional[int] = size if size is not None else {'height': 18, 'width': 18}
_lowerCamelCase : str = parent
_lowerCamelCase : Optional[Any] = batch_size
_lowerCamelCase : Optional[int] = num_channels
_lowerCamelCase : int = image_size
_lowerCamelCase : int = min_resolution
_lowerCamelCase : Optional[int] = max_resolution
_lowerCamelCase : Union[str, Any] = do_resize
_lowerCamelCase : Dict = size
_lowerCamelCase : int = apply_ocr
def A_ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class lowerCAmelCase__ ( lowercase, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def A_ ( self ):
_lowerCamelCase : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
_lowerCamelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , 'do_resize' ) )
self.assertTrue(hasattr(lowercase , 'size' ) )
self.assertTrue(hasattr(lowercase , 'apply_ocr' ) )
def A_ ( self ):
_lowerCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
_lowerCamelCase : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , lowercase )
self.assertIsInstance(encoding.boxes , lowercase )
# Test batched
_lowerCamelCase : List[Any] = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
_lowerCamelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowerCamelCase : List[Any] = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A_ ( self ):
# Initialize image_processing
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
_lowerCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
_lowerCamelCase : Dict = image_processing(lowercase , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def A_ ( self ):
# with apply_OCR = True
_lowerCamelCase : Optional[Any] = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowerCamelCase : Tuple = load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
_lowerCamelCase : Tuple = Image.open(ds[0]['file'] ).convert('RGB' )
_lowerCamelCase : Dict = image_processing(lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowerCamelCase : Tuple = [['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
_lowerCamelCase : List[str] = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase )
self.assertListEqual(encoding.boxes , lowercase )
# with apply_OCR = False
_lowerCamelCase : Tuple = LayoutLMvaImageProcessor(apply_ocr=lowercase )
_lowerCamelCase : Dict = image_processing(lowercase , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) ) | 12 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """philschmid/bart-large-cnn-samsum"""
lowerCamelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowerCamelCase__ = """summarizer"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = ["""text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase ):
return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )[0]
def A_ ( self , lowercase ):
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) | 12 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 12 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = list(range(len(lowercase__ ) ) )
_lowerCamelCase : Any = [v / w for v, w in zip(lowercase__ , lowercase__ )]
index.sort(key=lambda lowercase__ : ratio[i] , reverse=lowercase__ )
_lowerCamelCase : float = 0
_lowerCamelCase : list[float] = [0] * len(lowercase__ )
for i in index:
if weight[i] <= capacity:
_lowerCamelCase : int = 1
max_value += value[i]
capacity -= weight[i]
else:
_lowerCamelCase : Any = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
lowercase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowercase__ = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def _snake_case ( lowercase__ , lowercase__ , lowercase__=8 ):
_lowerCamelCase : Optional[int] = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : str = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , ):
super().__init__()
self.register_modules(
unet=lowercase , scheduler=lowercase , movq=lowercase , )
_lowerCamelCase : Optional[int] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def A_ ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
if latents is None:
_lowerCamelCase : str = randn_tensor(lowercase , generator=lowercase , device=lowercase , dtype=lowercase )
else:
if latents.shape != shape:
raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' )
_lowerCamelCase : Any = latents.to(lowercase )
_lowerCamelCase : Tuple = latents * scheduler.init_noise_sigma
return latents
def A_ ( self , lowercase=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
_lowerCamelCase : List[str] = torch.device(F'''cuda:{gpu_id}''' )
_lowerCamelCase : Any = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase , lowercase )
def A_ ( self , lowercase=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
_lowerCamelCase : List[Any] = torch.device(F'''cuda:{gpu_id}''' )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=lowercase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : List[str] = cpu_offload_with_hook(lowercase , lowercase , prev_module_hook=lowercase )
# We'll offload the last model manually.
_lowerCamelCase : Union[str, Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A_ ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase )
def __call__( self , lowercase , lowercase , lowercase , lowercase = 512 , lowercase = 512 , lowercase = 100 , lowercase = 4.0 , lowercase = 1 , lowercase = None , lowercase = None , lowercase = "pil" , lowercase = True , ):
_lowerCamelCase : Union[str, Any] = self._execution_device
_lowerCamelCase : Optional[int] = guidance_scale > 1.0
if isinstance(lowercase , lowercase ):
_lowerCamelCase : int = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
_lowerCamelCase : Union[str, Any] = torch.cat(lowercase , dim=0 )
if isinstance(lowercase , lowercase ):
_lowerCamelCase : Any = torch.cat(lowercase , dim=0 )
_lowerCamelCase : Optional[int] = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_lowerCamelCase : Any = image_embeds.repeat_interleave(lowercase , dim=0 )
_lowerCamelCase : List[str] = negative_image_embeds.repeat_interleave(lowercase , dim=0 )
_lowerCamelCase : List[str] = hint.repeat_interleave(lowercase , dim=0 )
_lowerCamelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
_lowerCamelCase : Union[str, Any] = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase )
self.scheduler.set_timesteps(lowercase , device=lowercase )
_lowerCamelCase : int = self.scheduler.timesteps
_lowerCamelCase : Optional[Any] = self.movq.config.latent_channels
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(lowercase , lowercase , self.movq_scale_factor )
# create initial latent
_lowerCamelCase : Any = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowercase , lowercase , lowercase , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : Union[str, Any] = {'image_embeds': image_embeds, 'hint': hint}
_lowerCamelCase : Optional[Any] = self.unet(
sample=lowercase , timestep=lowercase , encoder_hidden_states=lowercase , added_cond_kwargs=lowercase , return_dict=lowercase , )[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : int = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCamelCase, _lowerCamelCase : List[Any] = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : Optional[int] = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Tuple = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Optional[Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Any = self.scheduler.step(
lowercase , lowercase , lowercase , generator=lowercase , )[0]
# post-processing
_lowerCamelCase : int = self.movq.decode(lowercase , force_not_quantize=lowercase )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' )
if output_type in ["np", "pil"]:
_lowerCamelCase : Dict = image * 0.5 + 0.5
_lowerCamelCase : Any = image.clamp(0 , 1 )
_lowerCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : List[Any] = self.numpy_to_pil(lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase ) | 12 |
"""simple docstring"""
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
lowercase__ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("""""", """|""", """|"""),
datarow=DataRow("""""", """|""", """|"""),
padding=1,
with_header_hide=None,
)
lowercase__ = []
lowercase__ = []
lowercase__ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
lowercase__ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": F"🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results",
"""emoji""": True,
},
}
]
lowercase__ = 0
for log in Path().glob("""*.log"""):
lowercase__ = 0
with open(log, """r""") as f:
for line in f:
lowercase__ = json.loads(line)
if line.get("""nodeid""", """""") != "":
lowercase__ = line["""nodeid"""]
if line.get("""duration""", None) is not None:
lowercase__ = F"{line['duration']:.4f}"
if line.get("""outcome""", """""") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("""_""")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
lowercase__ = []
log.unlink()
lowercase__ = """"""
lowercase__ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += F"*{name[1:]}: {num_failed} failed test*\n"
else:
message += F"*{name[1:]}: {num_failed} failed tests*\n"
lowercase__ = []
lowercase__ = {}
for test in failed_tests:
lowercase__ = test[0].split("""::""")
lowercase__ = data[0].split("""/""")[-1]
if data[0] not in filesafailed:
lowercase__ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
lowercase__ = [test[0] for test in failed_table]
lowercase__ = list(set(files))
# Count number of instances in failed_tests
lowercase__ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
lowercase__ = tabulate(
table,
headers=["""Test Location""", """Num Failed"""],
tablefmt=hf_table_format,
stralign="""right""",
)
message += F"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
lowercase__ = """Too many failed tests, please see the full report in the Action results."""
lowercase__ = len(err) + 10
lowercase__ = message[: 3000 - offset] + F"\n...\n```\n{err}"
print(F"### {message}")
else:
lowercase__ = """No failed tests! 🤗"""
print(F"## {message}")
payload.append(no_error_payload)
if os.environ.get("""TEST_TYPE""", """""") != "":
from slack_sdk import WebClient
lowercase__ = WebClient(token=os.environ["""SLACK_API_TOKEN"""])
if message != "No failed tests! 🤗":
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": F"https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
lowercase__ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": F"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
}
],
}
payload.append(date_report)
lowercase__ = client.chat_postMessage(channel="""#accelerate-ci-daily""", text=message, blocks=payload)
lowercase__ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
lowercase__ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
lowercase__ = row[0]
else:
lowercase__ = """"""
lowercase__ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": F"Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```",
},
}
client.chat_postMessage(
channel="""#accelerate-ci-daily""",
thread_ts=ts,
blocks=[payload],
) | 12 | 1 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ , lowercase__ ):
if len(lowercase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(lowercase__ )
or left < -len(lowercase__ )
or right >= len(lowercase__ )
or right < -len(lowercase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
_lowerCamelCase : int = (left + right) >> 1 # the middle
_lowerCamelCase : Tuple = find_max(lowercase__ , lowercase__ , lowercase__ ) # find max in range[left, mid]
_lowerCamelCase : str = find_max(lowercase__ , mid + 1 , lowercase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 12 |
"""simple docstring"""
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """AutoTokenizer"""
lowerCamelCase__ = ["""tokenizer"""]
lowerCamelCase__ = {
"""semantic_prompt""": 1,
"""coarse_prompt""": 2,
"""fine_prompt""": 2,
}
def __init__( self , lowercase , lowercase=None ):
super().__init__(lowercase )
_lowerCamelCase : Optional[int] = speaker_embeddings
@classmethod
def A_ ( cls , lowercase , lowercase="speaker_embeddings_path.json" , **lowercase ):
if speaker_embeddings_dict_path is not None:
_lowerCamelCase : Optional[Any] = get_file_from_repo(
lowercase , lowercase , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(lowercase , lowercase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
_lowerCamelCase : List[Any] = None
else:
with open(lowercase ) as speaker_embeddings_json:
_lowerCamelCase : Union[str, Any] = json.load(lowercase )
else:
_lowerCamelCase : Tuple = None
_lowerCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowercase , **lowercase )
return cls(tokenizer=lowercase , speaker_embeddings=lowercase )
def A_ ( self , lowercase , lowercase="speaker_embeddings_path.json" , lowercase="speaker_embeddings" , lowercase = False , **lowercase , ):
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(lowercase , lowercase , 'v2' ) , exist_ok=lowercase )
_lowerCamelCase : int = {}
_lowerCamelCase : List[Any] = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
_lowerCamelCase : Optional[Any] = self._load_voice_preset(lowercase )
_lowerCamelCase : Any = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , lowercase , F'''{prompt_key}_{key}''' ) , voice_preset[key] , allow_pickle=lowercase , )
_lowerCamelCase : List[str] = os.path.join(lowercase , F'''{prompt_key}_{key}.npy''' )
_lowerCamelCase : Optional[Any] = tmp_dict
with open(os.path.join(lowercase , lowercase ) , 'w' ) as fp:
json.dump(lowercase , lowercase )
super().save_pretrained(lowercase , lowercase , **lowercase )
def A_ ( self , lowercase = None , **lowercase ):
_lowerCamelCase : Tuple = self.speaker_embeddings[voice_preset]
_lowerCamelCase : Any = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
_lowerCamelCase : Union[str, Any] = get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , lowercase ) , cache_dir=kwargs.pop('cache_dir' , lowercase ) , force_download=kwargs.pop('force_download' , lowercase ) , proxies=kwargs.pop('proxies' , lowercase ) , resume_download=kwargs.pop('resume_download' , lowercase ) , local_files_only=kwargs.pop('local_files_only' , lowercase ) , use_auth_token=kwargs.pop('use_auth_token' , lowercase ) , revision=kwargs.pop('revision' , lowercase ) , )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
_lowerCamelCase : List[str] = np.load(lowercase )
return voice_preset_dict
def A_ ( self , lowercase = None ):
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self , lowercase=None , lowercase=None , lowercase="pt" , lowercase=256 , lowercase=False , lowercase=True , lowercase=False , **lowercase , ):
if voice_preset is not None and not isinstance(lowercase , lowercase ):
if (
isinstance(lowercase , lowercase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
_lowerCamelCase : Any = self._load_voice_preset(lowercase )
else:
if isinstance(lowercase , lowercase ) and not voice_preset.endswith('.npz' ):
_lowerCamelCase : Optional[Any] = voice_preset + '.npz'
_lowerCamelCase : Union[str, Any] = np.load(lowercase )
if voice_preset is not None:
self._validate_voice_preset_dict(lowercase , **lowercase )
_lowerCamelCase : Tuple = BatchFeature(data=lowercase , tensor_type=lowercase )
_lowerCamelCase : Any = self.tokenizer(
lowercase , return_tensors=lowercase , padding='max_length' , max_length=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , add_special_tokens=lowercase , **lowercase , )
if voice_preset is not None:
_lowerCamelCase : Optional[int] = voice_preset
return encoded_text | 12 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowercase__ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowercase__ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowercase__ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 12 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
lowercase__ = False
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
pass
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = VersatileDiffusionImageVariationPipeline.from_pretrained('shi-labs/versatile-diffusion' )
pipe.to(lowercase )
pipe.set_progress_bar_config(disable=lowercase )
_lowerCamelCase : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
_lowerCamelCase : Dict = torch.manual_seed(0 )
_lowerCamelCase : Dict = pipe(
image=lowercase , generator=lowercase , guidance_scale=7.5 , num_inference_steps=50 , output_type='numpy' , ).images
_lowerCamelCase : str = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCamelCase : Any = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 12 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """philschmid/bart-large-cnn-samsum"""
lowerCamelCase__ = (
"""This is a tool that summarizes an English text. It takes an input `text` containing the text to summarize, """
"""and returns a summary of the text."""
)
lowerCamelCase__ = """summarizer"""
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = ["""text"""]
lowerCamelCase__ = ["""text"""]
def A_ ( self , lowercase ):
return self.pre_processor(lowercase , return_tensors='pt' , truncation=lowercase )
def A_ ( self , lowercase ):
return self.model.generate(**lowercase )[0]
def A_ ( self , lowercase ):
return self.pre_processor.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) | 12 |
"""simple docstring"""
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
lowercase__ = {
"""E""": 12.70,
"""T""": 9.06,
"""A""": 8.17,
"""O""": 7.51,
"""I""": 6.97,
"""N""": 6.75,
"""S""": 6.33,
"""H""": 6.09,
"""R""": 5.99,
"""D""": 4.25,
"""L""": 4.03,
"""C""": 2.78,
"""U""": 2.76,
"""M""": 2.41,
"""W""": 2.36,
"""F""": 2.23,
"""G""": 2.02,
"""Y""": 1.97,
"""P""": 1.93,
"""B""": 1.29,
"""V""": 0.98,
"""K""": 0.77,
"""J""": 0.15,
"""X""": 0.15,
"""Q""": 0.10,
"""Z""": 0.07,
}
lowercase__ = """ETAOINSHRDLCUMWFGYPBVKJXQZ"""
lowercase__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def _snake_case ( lowercase__ ):
_lowerCamelCase : Tuple = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def _snake_case ( lowercase__ ):
return x[0]
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = get_letter_count(lowercase__ )
_lowerCamelCase : dict[int, list[str]] = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(lowercase__ )
_lowerCamelCase : dict[int, str] = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=lowercase__ )
_lowerCamelCase : Optional[int] = ''.join(freq_to_letter[freq] )
_lowerCamelCase : Any = list(freq_to_letter_str.items() )
freq_pairs.sort(key=lowercase__ , reverse=lowercase__ )
_lowerCamelCase : list[str] = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(lowercase__ )
def _snake_case ( lowercase__ ):
_lowerCamelCase : str = get_frequency_order(lowercase__ )
_lowerCamelCase : Union[str, Any] = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod() | 12 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Tuple = tempfile.mkdtemp()
_lowerCamelCase : Any = BlipImageProcessor()
_lowerCamelCase : Tuple = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
_lowerCamelCase : Optional[Any] = BlipaProcessor(lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def A_ ( self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def A_ ( self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def A_ ( self ):
shutil.rmtree(self.tmpdirname )
def A_ ( self ):
_lowerCamelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCamelCase : int = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A_ ( self ):
_lowerCamelCase : Any = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCamelCase : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
_lowerCamelCase : List[str] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : List[Any] = image_processor(lowercase , return_tensors='np' )
_lowerCamelCase : str = processor(images=lowercase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = self.get_image_processor()
_lowerCamelCase : Dict = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
_lowerCamelCase : List[str] = 'lower newer'
_lowerCamelCase : Union[str, Any] = processor(text=lowercase )
_lowerCamelCase : Tuple = tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A_ ( self ):
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : Any = self.get_tokenizer()
_lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
_lowerCamelCase : List[str] = 'lower newer'
_lowerCamelCase : Dict = self.prepare_image_inputs()
_lowerCamelCase : int = processor(text=lowercase , images=lowercase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def A_ ( self ):
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Union[str, Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
_lowerCamelCase : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : Any = processor.batch_decode(lowercase )
_lowerCamelCase : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def A_ ( self ):
_lowerCamelCase : List[Any] = self.get_image_processor()
_lowerCamelCase : Tuple = self.get_tokenizer()
_lowerCamelCase : List[Any] = BlipaProcessor(tokenizer=lowercase , image_processor=lowercase )
_lowerCamelCase : Tuple = 'lower newer'
_lowerCamelCase : Dict = self.prepare_image_inputs()
_lowerCamelCase : Any = processor(text=lowercase , images=lowercase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) | 12 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase , lowercase ):
_lowerCamelCase : Dict = question_encoder
_lowerCamelCase : List[Any] = generator
_lowerCamelCase : Optional[Any] = self.question_encoder
def A_ ( self , lowercase ):
if os.path.isfile(lowercase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(lowercase , exist_ok=lowercase )
_lowerCamelCase : List[Any] = os.path.join(lowercase , 'question_encoder_tokenizer' )
_lowerCamelCase : Dict = os.path.join(lowercase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(lowercase )
self.generator.save_pretrained(lowercase )
@classmethod
def A_ ( cls , lowercase , **lowercase ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCamelCase : Optional[int] = kwargs.pop('config' , lowercase )
if config is None:
_lowerCamelCase : int = RagConfig.from_pretrained(lowercase )
_lowerCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
lowercase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCamelCase : Dict = AutoTokenizer.from_pretrained(
lowercase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=lowercase , generator=lowercase )
def __call__( self , *lowercase , **lowercase ):
return self.current_tokenizer(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.batch_decode(*lowercase , **lowercase )
def A_ ( self , *lowercase , **lowercase ):
return self.generator.decode(*lowercase , **lowercase )
def A_ ( self ):
_lowerCamelCase : Any = self.question_encoder
def A_ ( self ):
_lowerCamelCase : Optional[Any] = self.generator
def A_ ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = "longest" , lowercase = None , lowercase = True , **lowercase , ):
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , lowercase , )
if max_length is None:
_lowerCamelCase : Optional[Any] = self.current_tokenizer.model_max_length
_lowerCamelCase : Optional[Any] = self(
lowercase , add_special_tokens=lowercase , return_tensors=lowercase , max_length=lowercase , padding=lowercase , truncation=lowercase , **lowercase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCamelCase : int = self.current_tokenizer.model_max_length
_lowerCamelCase : str = self(
text_target=lowercase , add_special_tokens=lowercase , return_tensors=lowercase , padding=lowercase , max_length=lowercase , truncation=lowercase , **lowercase , )
_lowerCamelCase : int = labels['input_ids']
return model_inputs | 12 | 1 |
"""simple docstring"""
from __future__ import annotations
from PIL import Image
# Define glider example
lowercase__ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
lowercase__ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def _snake_case ( lowercase__ ):
_lowerCamelCase : Any = []
for i in range(len(lowercase__ ) ):
_lowerCamelCase : Dict = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
_lowerCamelCase : str = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(lowercase__ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(lowercase__ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(lowercase__ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
_lowerCamelCase : Dict = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(lowercase__ )
return next_generation
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Any = []
for _ in range(lowercase__ ):
# Create output image
_lowerCamelCase : Any = Image.new('RGB' , (len(cells[0] ), len(lowercase__ )) )
_lowerCamelCase : int = img.load()
# Save cells to image
for x in range(len(lowercase__ ) ):
for y in range(len(cells[0] ) ):
_lowerCamelCase : Any = 255 - cells[y][x] * 255
_lowerCamelCase : List[Any] = (colour, colour, colour)
# Save image
images.append(lowercase__ )
_lowerCamelCase : Tuple = new_generation(lowercase__ )
return images
if __name__ == "__main__":
lowercase__ = generate_images(GLIDER, 16)
images[0].save("""out.gif""", save_all=True, append_images=images[1:]) | 12 |
"""simple docstring"""
def _snake_case ( lowercase__ = 10 ):
if not isinstance(lowercase__ , lowercase__ ) or n < 0:
raise ValueError('Invalid input' )
_lowerCamelCase : str = 10**n
_lowerCamelCase : Union[str, Any] = 28433 * (pow(2 , 7830457 , lowercase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"{solution(10) = }") | 12 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.