code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_A : Optional[int] = logging.get_logger(__name__)
class UpperCAmelCase__ ( _snake_case ):
"""simple docstring"""
def __init__( self , A_ ) -> Optional[int]:
super().__init__()
__UpperCamelCase =nn.ModuleList(A_ )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = False , A_ = True , ) -> Any:
for i, (image, scale, controlnet) in enumerate(zip(A_ , A_ , self.nets ) ):
__UpperCamelCase , __UpperCamelCase =controlnet(
A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , )
# merge samples
if i == 0:
__UpperCamelCase , __UpperCamelCase =down_samples, mid_sample
else:
__UpperCamelCase =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(A_ , A_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def _a ( self , A_ , A_ = True , A_ = None , A_ = False , A_ = None , ) -> Tuple:
__UpperCamelCase =0
__UpperCamelCase =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
A_ , is_main_process=A_ , save_function=A_ , safe_serialization=A_ , variant=A_ , )
idx += 1
__UpperCamelCase =model_path_to_save + f'_{idx}'
@classmethod
def _a ( cls , A_ , **A_ ) -> Tuple:
__UpperCamelCase =0
__UpperCamelCase =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__UpperCamelCase =pretrained_model_path
while os.path.isdir(A_ ):
__UpperCamelCase =ControlNetModel.from_pretrained(A_ , **A_ )
controlnets.append(A_ )
idx += 1
__UpperCamelCase =pretrained_model_path + f'_{idx}'
logger.info(f'{len(A_ )} controlnets loaded from {pretrained_model_path}.' )
if len(A_ ) == 0:
raise ValueError(
f'No ControlNets found under {os.path.dirname(A_ )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(A_ )
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
import os
import zipfile
import requests
from get_ci_error_statistics import download_artifact, get_artifacts_links
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int=7 ):
__UpperCamelCase =None
if token is not None:
__UpperCamelCase ={'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
# The id of a workflow (not of a workflow run)
__UpperCamelCase ='636036'
__UpperCamelCase =F'https://api.github.com/repos/huggingface/transformers/actions/workflows/{workflow_id}/runs'
# On `main` branch + event being `schedule` + not returning PRs + only `num_runs` results
url += F'?branch=main&event=schedule&exclude_pull_requests=true&per_page={num_runs}'
__UpperCamelCase =requests.get(lowerCAmelCase__ , headers=lowerCAmelCase__ ).json()
return result["workflow_runs"]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =get_daily_ci_runs(lowerCAmelCase__ )
__UpperCamelCase =None
for workflow_run in workflow_runs:
if workflow_run["status"] == "completed":
__UpperCamelCase =workflow_run['id']
break
return workflow_run_id
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =get_last_daily_ci_runs(lowerCAmelCase__ )
if workflow_run_id is not None:
__UpperCamelCase =get_artifacts_links(worflow_run_id=lowerCAmelCase__ , token=lowerCAmelCase__ )
for artifact_name in artifact_names:
if artifact_name in artifacts_links:
__UpperCamelCase =artifacts_links[artifact_name]
download_artifact(
artifact_name=lowerCAmelCase__ , artifact_url=lowerCAmelCase__ , output_dir=lowerCAmelCase__ , token=lowerCAmelCase__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
get_last_daily_ci_artifacts(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCamelCase ={}
for artifact_name in artifact_names:
__UpperCamelCase =os.path.join(lowerCAmelCase__ , F'{artifact_name}.zip' )
if os.path.isfile(lowerCAmelCase__ ):
__UpperCamelCase ={}
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
with z.open(lowerCAmelCase__ ) as f:
__UpperCamelCase =f.read().decode('UTF-8' )
return results
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( __lowerCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = 4_2
UpperCAmelCase__ : List[str] = 4_2
def __init__( self , A_ , A_ ) -> Tuple:
super().__init__()
self.register_modules(unet=_UpperCamelCase , scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self , A_ = 1 , A_ = 2000 , A_ = None , A_ = "pil" , A_ = True , **A_ , ) -> Union[ImagePipelineOutput, Tuple]:
__UpperCamelCase =self.unet.config.sample_size
__UpperCamelCase =(batch_size, 3, img_size, img_size)
__UpperCamelCase =self.unet
__UpperCamelCase =randn_tensor(_UpperCamelCase , generator=_UpperCamelCase ) * self.scheduler.init_noise_sigma
__UpperCamelCase =sample.to(self.device )
self.scheduler.set_timesteps(_UpperCamelCase )
self.scheduler.set_sigmas(_UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCamelCase =self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCamelCase =self.unet(_UpperCamelCase , _UpperCamelCase ).sample
__UpperCamelCase =self.scheduler.step_correct(_UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# prediction step
__UpperCamelCase =model(_UpperCamelCase , _UpperCamelCase ).sample
__UpperCamelCase =self.scheduler.step_pred(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , generator=_UpperCamelCase )
__UpperCamelCase =output.prev_sample, output.prev_sample_mean
__UpperCamelCase =sample_mean.clamp(0 , 1 )
__UpperCamelCase =sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCamelCase =self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( a__ ):
"""simple docstring"""
UpperCAmelCase__ : int = ["pixel_values"]
def __init__( self , A_ = True , A_ = None , A_ = PIL.Image.BICUBIC , A_ = True , A_ = None , A_ = 1 / 255 , A_ = True , A_ = True , A_ = None , A_ = None , **A_ , ) -> str:
super().__init__(**_A )
__UpperCamelCase =size if size is not None else {'height': 256, 'width': 256}
__UpperCamelCase =get_size_dict(_A )
__UpperCamelCase =crop_size if crop_size is not None else {'height': 224, 'width': 224}
__UpperCamelCase =get_size_dict(_A , param_name='crop_size' )
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =resample
__UpperCamelCase =do_center_crop
__UpperCamelCase =crop_size
__UpperCamelCase =do_rescale
__UpperCamelCase =rescale_factor
__UpperCamelCase =do_normalize
__UpperCamelCase =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__UpperCamelCase =image_std if image_std is not None else IMAGENET_STANDARD_STD
def _a ( self , A_ , A_ , A_ = PIL.Image.BICUBIC , A_ = None , **A_ , ) -> str:
__UpperCamelCase =get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return resize(
_A , size=(size['height'], size['width']) , resample=_A , data_format=_A , **_A )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> int:
__UpperCamelCase =get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_A , size=(size['height'], size['width']) , data_format=_A , **_A )
def _a ( self , A_ , A_ , A_ = None , **A_ , ) -> Optional[Any]:
return rescale(_A , scale=_A , data_format=_A , **_A )
def _a ( self , A_ , A_ , A_ , A_ = None , **A_ , ) -> Union[str, Any]:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _a ( self , A_ , A_ = None , A_ = None , A_=None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , ) -> List[str]:
__UpperCamelCase =do_resize if do_resize is not None else self.do_resize
__UpperCamelCase =resample if resample is not None else self.resample
__UpperCamelCase =do_center_crop if do_center_crop is not None else self.do_center_crop
__UpperCamelCase =do_rescale if do_rescale is not None else self.do_rescale
__UpperCamelCase =rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCamelCase =do_normalize if do_normalize is not None else self.do_normalize
__UpperCamelCase =image_mean if image_mean is not None else self.image_mean
__UpperCamelCase =image_std if image_std is not None else self.image_std
__UpperCamelCase =size if size is not None else self.size
__UpperCamelCase =get_size_dict(_A )
__UpperCamelCase =crop_size if crop_size is not None else self.crop_size
__UpperCamelCase =get_size_dict(_A , param_name='crop_size' )
__UpperCamelCase =make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__UpperCamelCase =[to_numpy_array(_A ) for image in images]
if do_resize:
__UpperCamelCase =[self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
__UpperCamelCase =[self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
__UpperCamelCase =[self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__UpperCamelCase =[self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__UpperCamelCase =[to_channel_dimension_format(_A , _A ) for image in images]
__UpperCamelCase ={'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_A = 16
_A = 32
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int = 16 , SCREAMING_SNAKE_CASE__ : Tuple = "bert-base-cased" ):
__UpperCamelCase =AutoTokenizer.from_pretrained(_lowerCamelCase )
__UpperCamelCase =load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE__ : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__UpperCamelCase =datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE__ : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(_lowerCamelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__UpperCamelCase =DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
__UpperCamelCase =DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ):
# Initialize accelerator
__UpperCamelCase =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase =config["lr"]
__UpperCamelCase =int(config['num_epochs'] )
__UpperCamelCase =int(config['seed'] )
__UpperCamelCase =int(config['batch_size'] )
__UpperCamelCase =args.model_name_or_path
set_seed(_lowerCamelCase )
__UpperCamelCase =get_dataloaders(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase =AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
# Instantiate optimizer
__UpperCamelCase =(
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__UpperCamelCase =optimizer_cls(params=model.parameters() , lr=_lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
__UpperCamelCase =accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
__UpperCamelCase =1
__UpperCamelCase =(len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=0 , num_training_steps=_lowerCamelCase , )
else:
__UpperCamelCase =DummyScheduler(_lowerCamelCase , total_num_steps=_lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase =accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
__UpperCamelCase =0
# We also need to keep track of the stating epoch so files are named properly
__UpperCamelCase =0
# Now we train the model
__UpperCamelCase =evaluate.load('glue' , 'mrpc' )
__UpperCamelCase =0
__UpperCamelCase ={}
for epoch in range(_lowerCamelCase , _lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
__UpperCamelCase =model(**_lowerCamelCase )
__UpperCamelCase =outputs.loss
__UpperCamelCase =loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
__UpperCamelCase =0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase =model(**_lowerCamelCase )
__UpperCamelCase =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
__UpperCamelCase =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCamelCase ) - 1:
__UpperCamelCase =predictions[: len(eval_dataloader.dataset ) - samples_seen]
__UpperCamelCase =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
__UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCamelCase )
__UpperCamelCase =eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
__UpperCamelCase =eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCamelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCamelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCamelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowerCamelCase , default=_lowerCamelCase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCamelCase , default=3 , help='Number of train epochs.' , )
__UpperCamelCase =parser.parse_args()
__UpperCamelCase ={"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : bytes ):
return "".join([hex(a_ )[2:].zfill(2 ).upper() for byte in list(a_ )] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
if (len(a_ ) % 2) != 0:
raise ValueError(
'Base16 encoded data is invalid:\nData does not have an even number of hex digits.' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(a_ ) <= set('0123456789ABCDEF' ):
raise ValueError(
'Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters.' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(a_ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (DDPMScheduler,)
def _a ( self , **A_ ) -> str:
__UpperCamelCase ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**__UpperCamelCase )
return config
def _a ( self ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _a ( self ) -> List[Any]:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def _a ( self ) -> Tuple:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _a ( self ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def _a ( self ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def _a ( self ) -> List[str]:
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def _a ( self ) -> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _a ( self ) -> int:
for t in [0, 500, 999]:
self.check_over_forward(time_step=__UpperCamelCase )
def _a ( self ) -> int:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def _a ( self ) -> Any:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =len(__UpperCamelCase )
__UpperCamelCase =self.dummy_model()
__UpperCamelCase =self.dummy_sample_deter
__UpperCamelCase =torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
__UpperCamelCase =model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase =scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCamelCase =pred_prev_sample
__UpperCamelCase =torch.sum(torch.abs(__UpperCamelCase ) )
__UpperCamelCase =torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def _a ( self ) -> Dict:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config(prediction_type='v_prediction' )
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =len(__UpperCamelCase )
__UpperCamelCase =self.dummy_model()
__UpperCamelCase =self.dummy_sample_deter
__UpperCamelCase =torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
__UpperCamelCase =model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
__UpperCamelCase =scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__UpperCamelCase =pred_prev_sample
__UpperCamelCase =torch.sum(torch.abs(__UpperCamelCase ) )
__UpperCamelCase =torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =[100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
__UpperCamelCase =scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
__UpperCamelCase =-1
else:
__UpperCamelCase =timesteps[i + 1]
__UpperCamelCase =scheduler.previous_timestep(__UpperCamelCase )
__UpperCamelCase =prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =[100, 87, 50, 51, 0]
with self.assertRaises(__UpperCamelCase , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =[100, 87, 50, 1, 0]
__UpperCamelCase =len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**__UpperCamelCase )
__UpperCamelCase =[scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_A = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=_lowercase ).to(_lowercase )
__UpperCamelCase =AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase =tokenizer('Hello there' , return_tensors='pt' ).input_ids
__UpperCamelCase =tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__UpperCamelCase =model(input_ids.to(_lowercase ) , labels=labels.to(_lowercase ) ).loss
__UpperCamelCase =-(labels.shape[-1] * loss.item())
__UpperCamelCase =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] ):
__UpperCamelCase =len(snake_case__ )
print('The following activities are selected:' )
# The first activity is always selected
__UpperCamelCase =0
print(snake_case__ , end=',' )
# Consider rest of the activities
for j in range(snake_case__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(snake_case__ , end=',' )
__UpperCamelCase =j
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = [1, 3, 0, 5, 8, 5]
_A = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
_A = {
'vocab_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/vocab.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/vocab.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/vocab.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/vocab.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/vocab.json',
},
'merges_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/merges.txt',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/merges.txt',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/merges.txt',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/merges.txt',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/merges.txt',
},
'tokenizer_file': {
'gpt2': 'https://huggingface.co/gpt2/resolve/main/tokenizer.json',
'gpt2-medium': 'https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json',
'gpt2-large': 'https://huggingface.co/gpt2-large/resolve/main/tokenizer.json',
'gpt2-xl': 'https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json',
'distilgpt2': 'https://huggingface.co/distilgpt2/resolve/main/tokenizer.json',
},
}
_A = {
'gpt2': 1024,
'gpt2-medium': 1024,
'gpt2-large': 1024,
'gpt2-xl': 1024,
'distilgpt2': 1024,
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = ['input_ids', 'attention_mask']
UpperCAmelCase__ : List[Any] = GPTaTokenizer
def __init__( self , A_=None , A_=None , A_=None , A_="<|endoftext|>" , A_="<|endoftext|>" , A_="<|endoftext|>" , A_=False , **A_ , ) -> List[Any]:
super().__init__(
A_ , A_ , tokenizer_file=A_ , unk_token=A_ , bos_token=A_ , eos_token=A_ , add_prefix_space=A_ , **A_ , )
__UpperCamelCase =kwargs.pop('add_bos_token' , A_ )
__UpperCamelCase =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , A_ ) != add_prefix_space:
__UpperCamelCase =getattr(A_ , pre_tok_state.pop('type' ) )
__UpperCamelCase =add_prefix_space
__UpperCamelCase =pre_tok_class(**A_ )
__UpperCamelCase =add_prefix_space
def _a ( self , *A_ , **A_ ) -> Any:
__UpperCamelCase =kwargs.get('is_split_into_words' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A_ , **A_ )
def _a ( self , *A_ , **A_ ) -> Union[str, Any]:
__UpperCamelCase =kwargs.get('is_split_into_words' , A_ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A_ , **A_ )
def _a ( self , A_ , A_ = None ) -> Any:
__UpperCamelCase =self._tokenizer.model.save(A_ , name=A_ )
return tuple(A_ )
def _a ( self , A_ ) -> Optional[Any]:
__UpperCamelCase =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(A_ , add_special_tokens=A_ ) + [self.eos_token_id] )
if len(A_ ) > self.model_max_length:
__UpperCamelCase =input_ids[-self.model_max_length :]
return input_ids
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =FunnelConfig.from_json_file(SCREAMING_SNAKE_CASE__ )
print(F'Building PyTorch model from configuration: {config}' )
__UpperCamelCase =FunnelBaseModel(SCREAMING_SNAKE_CASE__ ) if base_model else FunnelModel(SCREAMING_SNAKE_CASE__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
import argparse
import datetime
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase ={
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
__UpperCamelCase ={0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case_ ) < 11:
raise ValueError('Must be 10 characters long' )
# Get month
__UpperCamelCase =int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError('Month must be between 1 - 12' )
__UpperCamelCase =date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get day
__UpperCamelCase =int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError('Date must be between 1 - 31' )
# Get second separator
__UpperCamelCase =date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError('Date separator must be \'-\' or \'/\'' )
# Get year
__UpperCamelCase =int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 85_00:
raise ValueError(
'Year out of range. There has to be some sort of limit...right?' )
# Get datetime obj for validation
__UpperCamelCase =datetime.date(int(snake_case_ ) , int(snake_case_ ) , int(snake_case_ ) )
# Start math
if m <= 2:
__UpperCamelCase =y - 1
__UpperCamelCase =m + 12
# maths var
__UpperCamelCase =int(str(snake_case_ )[:2] )
__UpperCamelCase =int(str(snake_case_ )[2:] )
__UpperCamelCase =int(2.6 * m - 5.39 )
__UpperCamelCase =int(c / 4 )
__UpperCamelCase =int(k / 4 )
__UpperCamelCase =int(d + k )
__UpperCamelCase =int(t + u + v + x )
__UpperCamelCase =int(z - (2 * c) )
__UpperCamelCase =round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError('The date was evaluated incorrectly. Contact developer.' )
# Response
__UpperCamelCase =F'Your date {date_input}, is a {days[str(snake_case_ )]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
_A = parser.parse_args()
zeller(args.date_input)
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
_A = logging.get_logger(__name__)
_A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "bloom"
UpperCAmelCase__ : List[str] = ["past_key_values"]
UpperCAmelCase__ : Tuple = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , A_=250880 , A_=64 , A_=2 , A_=8 , A_=1E-5 , A_=0.02 , A_=True , A_=1 , A_=2 , A_=False , A_=0.0 , A_=0.0 , A_=1 , A_=False , **A_ , ) -> List[str]:
__UpperCamelCase =vocab_size
# Backward compatibility with n_embed kwarg
__UpperCamelCase =kwargs.pop('n_embed' , A_ )
__UpperCamelCase =hidden_size if n_embed is None else n_embed
__UpperCamelCase =n_layer
__UpperCamelCase =n_head
__UpperCamelCase =layer_norm_epsilon
__UpperCamelCase =initializer_range
__UpperCamelCase =use_cache
__UpperCamelCase =pretraining_tp
__UpperCamelCase =apply_residual_connection_post_layernorm
__UpperCamelCase =hidden_dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =bos_token_id
__UpperCamelCase =eos_token_id
__UpperCamelCase =slow_but_exact
super().__init__(bos_token_id=A_ , eos_token_id=A_ , **A_ )
class UpperCAmelCase__ ( lowercase__ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = version.parse("1.12" )
def __init__( self , A_ , A_ = "default" , A_ = None , A_ = False , ) -> int:
super().__init__(A_ , task=A_ , patching_specs=A_ , use_past=A_ )
if not getattr(self._config , 'pad_token_id' , A_ ):
# TODO: how to do that better?
__UpperCamelCase =0
@property
def _a ( self ) -> Mapping[str, Mapping[int, str]]:
__UpperCamelCase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(A_ , direction='inputs' , inverted_values_shape=A_ )
__UpperCamelCase ={0: "batch", 1: "past_sequence + sequence"}
else:
__UpperCamelCase ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _a ( self ) -> int:
return self._config.n_layer
@property
def _a ( self ) -> int:
return self._config.n_head
@property
def _a ( self ) -> float:
return 1E-3
def _a ( self , A_ , A_ = -1 , A_ = -1 , A_ = False , A_ = None , ) -> Mapping[str, Any]:
__UpperCamelCase =super(A_ , self ).generate_dummy_inputs(
A_ , batch_size=A_ , seq_length=A_ , is_pair=A_ , framework=A_ )
# We need to order the input in the way they appears in the forward()
__UpperCamelCase =OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__UpperCamelCase =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__UpperCamelCase =seqlen + 2
__UpperCamelCase =self._config.hidden_size // self.num_attention_heads
__UpperCamelCase =(
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__UpperCamelCase =(
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__UpperCamelCase =[
(torch.zeros(A_ ), torch.zeros(A_ )) for _ in range(self.num_layers )
]
__UpperCamelCase =common_inputs["attention_mask"]
if self.use_past:
__UpperCamelCase =ordered_inputs["attention_mask"].dtype
__UpperCamelCase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(A_ , A_ , dtype=A_ )] , dim=1 )
return ordered_inputs
@property
def _a ( self ) -> int:
return 13
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list ):
def merge(SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0 )
yield from left
yield from right
return list(_merge() )
if len(SCREAMING_SNAKE_CASE__ ) <= 1:
return collection
__UpperCamelCase =len(SCREAMING_SNAKE_CASE__ ) // 2
return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input('Enter numbers separated by a comma:\n').strip()
_A = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
import os
import sys
import unittest
_A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_A = os.path.join(git_repo_path, 'src', 'transformers')
_A = '\n{0} = None\n'
_A = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_A = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Optional[int]:
__UpperCamelCase =find_backend(' _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")' )
self.assertIsNone(a_ )
__UpperCamelCase =find_backend(' if not is_tokenizers_available():' )
self.assertEqual(a_ , 'tokenizers' )
__UpperCamelCase =find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(a_ , 'tensorflow_text' )
__UpperCamelCase =find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(a_ , 'sentencepiece_and_tokenizers' )
__UpperCamelCase =find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(a_ , 'sentencepiece_and_tensorflow_text' )
__UpperCamelCase =find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(a_ , 'sentencepiece_and_tokenizers_and_vision' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , a_ )
self.assertIn('tensorflow_text' , a_ )
self.assertIn('sentencepiece_and_tokenizers' , a_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(a_ , '\nCONSTANT = None\n' )
__UpperCamelCase =create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
a_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__UpperCamelCase ="""
class FakeClass(metaclass=DummyObject):
_backends = 'torch'
def __init__(self, *args, **kwargs):
requires_backends(self, 'torch')
"""
__UpperCamelCase =create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(a_ , a_ )
def _a ( self ) -> Tuple:
__UpperCamelCase ="""# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, [\"torch\"])
class FakeClass(metaclass=DummyObject):
_backends = [\"torch\"]
def __init__(self, *args, **kwargs):
requires_backends(self, [\"torch\"])
"""
__UpperCamelCase =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , a_ )
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
def wrapper(*SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =timeit.default_timer()
__UpperCamelCase =func(*__snake_case , **__snake_case )
__UpperCamelCase =timeit.default_timer() - starttime
return delta
__UpperCamelCase =func.__name__
return wrapper
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any=1_00 , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
__UpperCamelCase =[]
__UpperCamelCase =seq_shapes or {}
for i in range(__snake_case ):
__UpperCamelCase ={}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__snake_case , _ArrayXD ):
__UpperCamelCase =np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__snake_case , datasets.Value ):
if v.dtype == "string":
__UpperCamelCase ='The small grey turtle was surprisingly fast when challenged.'
else:
__UpperCamelCase =np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__snake_case , datasets.Sequence ):
while isinstance(__snake_case , datasets.Sequence ):
__UpperCamelCase =v.feature
__UpperCamelCase =seq_shapes[k]
__UpperCamelCase =np.random.rand(*__snake_case ).astype(v.dtype )
__UpperCamelCase =data
dummy_data.append((i, example) )
return dummy_data
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any]=1_00 , SCREAMING_SNAKE_CASE__ : List[str]=None ):
__UpperCamelCase =generate_examples(__snake_case , num_examples=__snake_case , seq_shapes=__snake_case )
with ArrowWriter(features=__snake_case , path=__snake_case ) as writer:
for key, record in dummy_data:
__UpperCamelCase =features.encode_example(__snake_case )
writer.write(__snake_case )
__UpperCamelCase , __UpperCamelCase =writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
__UpperCamelCase =datasets.Dataset.from_file(filename=__snake_case , info=datasets.DatasetInfo(features=__snake_case ) )
return dataset
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = ComputeEnvironment.AMAZON_SAGEMAKER
UpperCAmelCase__ : List[Any] = True
UpperCAmelCase__ : Union[str, Any] = "ml.p3.2xlarge"
UpperCAmelCase__ : Tuple = "accelerate_sagemaker_execution_role"
UpperCAmelCase__ : Tuple = "hf-sm"
UpperCAmelCase__ : List[str] = "us-east-1"
UpperCAmelCase__ : List[str] = 1
UpperCAmelCase__ : str = "accelerate-sagemaker-1"
UpperCAmelCase__ : Optional[int] = "1.6"
UpperCAmelCase__ : Union[str, Any] = "4.4"
UpperCAmelCase__ : Tuple = "train.py"
UpperCAmelCase__ : Any = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
UpperCAmelCase__ : Any = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> int:
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase =_convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , lowercase_ )
assert isinstance(converted_args['do_train'] , lowercase_ )
assert isinstance(converted_args['epochs'] , lowercase_ )
assert isinstance(converted_args['learning_rate'] , lowercase_ )
assert isinstance(converted_args['max_steps'] , lowercase_ )
with pytest.raises(lowercase_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase__ ( a__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = CLIPTokenizer
UpperCAmelCase__ : Tuple = CLIPTokenizerFast
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : int = False
def _a ( self ) -> Any:
super().setUp()
# fmt: off
__UpperCamelCase =["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
__UpperCamelCase =dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__UpperCamelCase =["#version: 0.2", "l o", "lo w</w>", "e r</w>"]
__UpperCamelCase ={"unk_token": "<unk>"}
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase__ ) )
def _a ( self , **A_ ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _a ( self , **A_ ) -> int:
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase__ )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase ="lower newer"
__UpperCamelCase ="lower newer"
return input_text, output_text
def _a ( self ) -> str:
__UpperCamelCase =CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__UpperCamelCase ="lower newer"
__UpperCamelCase =["lo", "w", "er</w>", "n", "e", "w", "er</w>"]
__UpperCamelCase =tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
__UpperCamelCase =tokens + [tokenizer.unk_token]
__UpperCamelCase =[10, 2, 16, 9, 3, 2, 16, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
@require_ftfy
def _a ( self ) -> Dict:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__ , **lowerCAmelCase__ )
__UpperCamelCase ="A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d."
__UpperCamelCase =tokenizer_s.tokenize(lowerCAmelCase__ )
__UpperCamelCase =tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
__UpperCamelCase ="xa\u0303y" + " " + "x\xe3y"
__UpperCamelCase =tokenizer_s.tokenize(lowerCAmelCase__ )
__UpperCamelCase =tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of space type
__UpperCamelCase =[
"\u0009", # (horizontal tab, '\t')
"\u000B", # (vertical tab)
"\u000C", # (form feed)
"\u0020", # (space, ' ')
"\u200E", # (left-to-right mark):w
"\u200F", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
__UpperCamelCase =tokenizer_s.tokenize(lowerCAmelCase__ )
__UpperCamelCase =tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# Test that the tokenization is identical on unicode of line break type
__UpperCamelCase =[
"\u000A", # (line feed, '\n')
"\r\n", # (carriage return and line feed, '\r\n')
"\u000D", # (carriage return, '\r')
"\r", # (carriage return, '\r')
"\u000D", # (carriage return, '\r')
"\u2028", # (line separator)
"\u2029", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
__UpperCamelCase =tokenizer_s.tokenize(lowerCAmelCase__ )
__UpperCamelCase =tokenizer_r.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _a ( self ) -> List[str]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase ="hello" # `hello` is a token in the vocabulary of `pretrained_name`
__UpperCamelCase =f'{text_of_1_token} {text_of_1_token}'
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
__UpperCamelCase =tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(lowerCAmelCase__ ) + 1, len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
__UpperCamelCase =f' {text}'
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(
lowerCAmelCase__ , use_fast=lowerCAmelCase__ , )
__UpperCamelCase =tokenizer_r(lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(lowerCAmelCase__ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(lowerCAmelCase__ ) + 1, 1 + len(lowerCAmelCase__ ) + 1 + len(lowerCAmelCase__ )) , )
def _a ( self ) -> Union[str, Any]:
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(lowerCAmelCase__ ) as context:
self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' )
self.assertTrue(
context.exception.args[0].startswith(
'The `backend_tokenizer` provided does not match the expected format.' ) )
@require_ftfy
def _a ( self ) -> Union[str, Any]:
super().test_tokenization_python_rust_equals()
def _a ( self ) -> List[str]:
# CLIP always lower cases letters
pass
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_A = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = (UniPCMultistepScheduler,)
UpperCAmelCase__ : Dict = (("""num_inference_steps""", 2_5),)
def _a ( self , **A_ ) -> Optional[Any]:
__UpperCamelCase ={
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
'solver_type': 'bh2',
}
config.update(**UpperCAmelCase_ )
return config
def _a ( self , A_=0 , **A_ ) -> Dict:
__UpperCamelCase =dict(self.forward_default_kwargs )
__UpperCamelCase =kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
__UpperCamelCase =self.dummy_sample
__UpperCamelCase =0.1 * sample
__UpperCamelCase =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase =self.get_scheduler_config(**UpperCAmelCase_ )
__UpperCamelCase =scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
__UpperCamelCase =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
__UpperCamelCase =scheduler_class.from_pretrained(UpperCAmelCase_ )
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals
__UpperCamelCase =dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase , __UpperCamelCase =sample, sample
for t in range(UpperCAmelCase_ , time_step + scheduler.config.solver_order + 1 ):
__UpperCamelCase =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
__UpperCamelCase =new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self , A_=0 , **A_ ) -> Optional[int]:
__UpperCamelCase =dict(self.forward_default_kwargs )
__UpperCamelCase =kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
__UpperCamelCase =self.dummy_sample
__UpperCamelCase =0.1 * sample
__UpperCamelCase =[residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
__UpperCamelCase =dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCAmelCase_ )
__UpperCamelCase =scheduler_class.from_pretrained(UpperCAmelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCAmelCase_ )
# copy over dummy past residual (must be after setting timesteps)
__UpperCamelCase =dummy_past_residuals[: new_scheduler.config.solver_order]
__UpperCamelCase =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
__UpperCamelCase =new_scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def _a ( self , A_=None , **A_ ) -> int:
if scheduler is None:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config(**UpperCAmelCase_ )
__UpperCamelCase =scheduler_class(**UpperCAmelCase_ )
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config(**UpperCAmelCase_ )
__UpperCamelCase =scheduler_class(**UpperCAmelCase_ )
__UpperCamelCase =10
__UpperCamelCase =self.dummy_model()
__UpperCamelCase =self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase =model(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCamelCase =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
return sample
def _a ( self ) -> str:
__UpperCamelCase =dict(self.forward_default_kwargs )
__UpperCamelCase =kwargs.pop('num_inference_steps' , UpperCAmelCase_ )
for scheduler_class in self.scheduler_classes:
__UpperCamelCase =self.get_scheduler_config()
__UpperCamelCase =scheduler_class(**UpperCAmelCase_ )
__UpperCamelCase =self.dummy_sample
__UpperCamelCase =0.1 * sample
if num_inference_steps is not None and hasattr(UpperCAmelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(UpperCAmelCase_ )
elif num_inference_steps is not None and not hasattr(UpperCAmelCase_ , 'set_timesteps' ):
__UpperCamelCase =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__UpperCamelCase =[residual + 0.2, residual + 0.15, residual + 0.10]
__UpperCamelCase =dummy_past_residuals[: scheduler.config.solver_order]
__UpperCamelCase =scheduler.timesteps[5]
__UpperCamelCase =scheduler.timesteps[6]
__UpperCamelCase =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
__UpperCamelCase =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _a ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__UpperCamelCase =UniPCMultistepScheduler(**self.get_scheduler_config() )
__UpperCamelCase =self.full_loop(scheduler=UpperCAmelCase_ )
__UpperCamelCase =torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
__UpperCamelCase =DPMSolverSinglestepScheduler.from_config(scheduler.config )
__UpperCamelCase =DEISMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase =DPMSolverMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase =UniPCMultistepScheduler.from_config(scheduler.config )
__UpperCamelCase =self.full_loop(scheduler=UpperCAmelCase_ )
__UpperCamelCase =torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def _a ( self ) -> List[str]:
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_ )
def _a ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=UpperCAmelCase_ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , solver_order=UpperCAmelCase_ , solver_type=UpperCAmelCase_ , )
def _a ( self ) -> Optional[int]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_ )
def _a ( self ) -> Tuple:
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCAmelCase_ , solver_type=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , )
__UpperCamelCase =self.full_loop(
solver_order=UpperCAmelCase_ , solver_type=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , )
assert not torch.isnan(UpperCAmelCase_ ).any(), "Samples have nan numbers"
def _a ( self ) -> Optional[Any]:
self.check_over_configs(lower_order_final=UpperCAmelCase_ )
self.check_over_configs(lower_order_final=UpperCAmelCase_ )
def _a ( self ) -> Any:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=UpperCAmelCase_ , time_step=0 )
def _a ( self ) -> int:
__UpperCamelCase =self.full_loop()
__UpperCamelCase =torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.full_loop(prediction_type='v_prediction' )
__UpperCamelCase =torch.mean(torch.abs(UpperCAmelCase_ ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def _a ( self ) -> Dict:
__UpperCamelCase =self.scheduler_classes[0]
__UpperCamelCase =self.get_scheduler_config(thresholding=UpperCAmelCase_ , dynamic_thresholding_ratio=0 )
__UpperCamelCase =scheduler_class(**UpperCAmelCase_ )
__UpperCamelCase =10
__UpperCamelCase =self.dummy_model()
__UpperCamelCase =self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__UpperCamelCase =model(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCamelCase =scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ).prev_sample
assert sample.dtype == torch.floataa
def _a ( self , **A_ ) -> str:
for scheduler_class in self.scheduler_classes:
__UpperCamelCase =self.get_scheduler_config(**UpperCAmelCase_ )
__UpperCamelCase =scheduler_class(**UpperCAmelCase_ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
import re
import string
import numpy as np
import datasets
_A = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
_A = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
_A = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _a ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def _a ( self , A_ , A_ , A_=None , A_=False , A_=False , A_=False , ) -> Tuple:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
__UpperCamelCase =np.array([re.sub(UpperCamelCase__ , '' , UpperCamelCase__ ) for x in predictions] )
__UpperCamelCase =np.array([re.sub(UpperCamelCase__ , '' , UpperCamelCase__ ) for x in references] )
else:
__UpperCamelCase =np.asarray(UpperCamelCase__ )
__UpperCamelCase =np.asarray(UpperCamelCase__ )
if ignore_case:
__UpperCamelCase =np.char.lower(UpperCamelCase__ )
__UpperCamelCase =np.char.lower(UpperCamelCase__ )
if ignore_punctuation:
__UpperCamelCase =string.punctuation.maketrans('' , '' , string.punctuation )
__UpperCamelCase =np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__UpperCamelCase =np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
if ignore_numbers:
__UpperCamelCase =string.digits.maketrans('' , '' , string.digits )
__UpperCamelCase =np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__UpperCamelCase =np.char.translate(UpperCamelCase__ , table=UpperCamelCase__ )
__UpperCamelCase =predictions == references
return {"exact_match": np.mean(UpperCamelCase__ ) * 100}
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> List[Any]:
super().tearDown()
gc.collect()
def _a ( self ) -> List[str]:
__UpperCamelCase , __UpperCamelCase =FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' , revision='bf16' , dtype=jnp.bfloataa , )
__UpperCamelCase ='A painting of a squirrel eating a burger'
__UpperCamelCase =jax.device_count()
__UpperCamelCase =num_samples * [prompt]
__UpperCamelCase =sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =replicate(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =shard(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =jax.random.PRNGKey(0 )
__UpperCamelCase =jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
__UpperCamelCase =sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__UpperCamelCase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCamelCase =images[0, 253:256, 253:256, -1]
__UpperCamelCase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCamelCase =jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _a ( self ) -> Any:
__UpperCamelCase ='stabilityai/stable-diffusion-2'
__UpperCamelCase , __UpperCamelCase =FlaxDPMSolverMultistepScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder='scheduler' )
__UpperCamelCase , __UpperCamelCase =FlaxStableDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , revision='bf16' , dtype=jnp.bfloataa , )
__UpperCamelCase =scheduler_params
__UpperCamelCase ='A painting of a squirrel eating a burger'
__UpperCamelCase =jax.device_count()
__UpperCamelCase =num_samples * [prompt]
__UpperCamelCase =sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =replicate(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =shard(_SCREAMING_SNAKE_CASE )
__UpperCamelCase =jax.random.PRNGKey(0 )
__UpperCamelCase =jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
__UpperCamelCase =sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__UpperCamelCase =images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__UpperCamelCase =images[0, 253:256, 253:256, -1]
__UpperCamelCase =jnp.asarray(jax.device_get(image_slice.flatten() ) )
__UpperCamelCase =jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self ) -> str:
torch.manual_seed(0 )
__UpperCamelCase =UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _a ( self ) -> Dict:
__UpperCamelCase =self.dummy_uncond_unet
__UpperCamelCase =ScoreSdeVeScheduler()
__UpperCamelCase =ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =sde_ve(num_inference_steps=2 , output_type='numpy' , generator=A_ ).images
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =sde_ve(num_inference_steps=2 , output_type='numpy' , generator=A_ , return_dict=A_ )[
0
]
__UpperCamelCase =image[0, -3:, -3:, -1]
__UpperCamelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__UpperCamelCase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ="""google/ncsnpp-church-256"""
__UpperCamelCase =UNetaDModel.from_pretrained(A_ )
__UpperCamelCase =ScoreSdeVeScheduler.from_pretrained(A_ )
__UpperCamelCase =ScoreSdeVePipeline(unet=A_ , scheduler=A_ )
sde_ve.to(A_ )
sde_ve.set_progress_bar_config(disable=A_ )
__UpperCamelCase =torch.manual_seed(0 )
__UpperCamelCase =sde_ve(num_inference_steps=10 , output_type='numpy' , generator=A_ ).images
__UpperCamelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__UpperCamelCase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_ , A_=0.2 , A_=0.2 ) -> Union[str, Any]:
__UpperCamelCase =bp_numa
__UpperCamelCase =bp_numa
__UpperCamelCase =bp_numa
__UpperCamelCase =conva_get[:2]
__UpperCamelCase =conva_get[2]
__UpperCamelCase =size_pa
__UpperCamelCase =rate_w
__UpperCamelCase =rate_t
__UpperCamelCase =[
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
__UpperCamelCase =np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__UpperCamelCase =np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
__UpperCamelCase =-2 * np.random.rand(self.conva[1] ) + 1
__UpperCamelCase =-2 * np.random.rand(self.num_bpa ) + 1
__UpperCamelCase =-2 * np.random.rand(self.num_bpa ) + 1
def _a ( self , A_ ) -> Union[str, Any]:
# save model dict with pickle
__UpperCamelCase ={
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(__lowercase , 'wb' ) as f:
pickle.dump(__lowercase , __lowercase )
print(f'Model saved: {save_path}' )
@classmethod
def _a ( cls , A_ ) -> Optional[Any]:
# read saved model
with open(__lowercase , 'rb' ) as f:
__UpperCamelCase =pickle.load(__lowercase ) # noqa: S301
__UpperCamelCase =model_dic.get('conv1' )
conv_get.append(model_dic.get('step_conv1' ) )
__UpperCamelCase =model_dic.get('size_pooling1' )
__UpperCamelCase =model_dic.get('num_bp1' )
__UpperCamelCase =model_dic.get('num_bp2' )
__UpperCamelCase =model_dic.get('num_bp3' )
__UpperCamelCase =model_dic.get('rate_weight' )
__UpperCamelCase =model_dic.get('rate_thre' )
# create model instance
__UpperCamelCase =CNN(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# modify model parameter
__UpperCamelCase =model_dic.get('w_conv1' )
__UpperCamelCase =model_dic.get('wkj' )
__UpperCamelCase =model_dic.get('vji' )
__UpperCamelCase =model_dic.get('thre_conv1' )
__UpperCamelCase =model_dic.get('thre_bp2' )
__UpperCamelCase =model_dic.get('thre_bp3' )
return conv_ins
def _a ( self , A_ ) -> Any:
return 1 / (1 + np.exp(-1 * x ))
def _a ( self , A_ ) -> Dict:
return round(__lowercase , 3 )
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
# convolution process
__UpperCamelCase =convs[0]
__UpperCamelCase =convs[1]
__UpperCamelCase =np.shape(__lowercase )[0]
# get the data slice of original image data, data_focus
__UpperCamelCase =[]
for i_focus in range(0 , size_data - size_conv + 1 , __lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , __lowercase ):
__UpperCamelCase =data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(__lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
__UpperCamelCase =[]
__UpperCamelCase =int((size_data - size_conv) / conv_step + 1 )
for i_map in range(__lowercase ):
__UpperCamelCase =[]
for i_focus in range(len(__lowercase ) ):
__UpperCamelCase =(
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(__lowercase ) )
__UpperCamelCase =np.asmatrix(__lowercase ).reshape(
__lowercase , __lowercase )
data_featuremap.append(__lowercase )
# expanding the data slice to One dimenssion
__UpperCamelCase =[]
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(__lowercase ) )
__UpperCamelCase =np.asarray(__lowercase )
return focus_list, data_featuremap
def _a ( self , A_ , A_ , A_="average_pool" ) -> List[Any]:
# pooling process
__UpperCamelCase =len(featuremaps[0] )
__UpperCamelCase =int(size_map / size_pooling )
__UpperCamelCase =[]
for i_map in range(len(__lowercase ) ):
__UpperCamelCase =featuremaps[i_map]
__UpperCamelCase =[]
for i_focus in range(0 , __lowercase , __lowercase ):
for j_focus in range(0 , __lowercase , __lowercase ):
__UpperCamelCase =feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(__lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(__lowercase ) )
__UpperCamelCase =np.asmatrix(__lowercase ).reshape(__lowercase , __lowercase )
featuremap_pooled.append(__lowercase )
return featuremap_pooled
def _a ( self , A_ ) -> int:
# expanding three dimension data to one dimension list
__UpperCamelCase =[]
for i in range(len(__lowercase ) ):
__UpperCamelCase =np.shape(data[i] )
__UpperCamelCase =data[i].reshape(1 , shapes[0] * shapes[1] )
__UpperCamelCase =data_listed.getA().tolist()[0]
data_expanded.extend(__lowercase )
__UpperCamelCase =np.asarray(__lowercase )
return data_expanded
def _a ( self , A_ ) -> str:
# expanding matrix to one dimension list
__UpperCamelCase =np.asarray(__lowercase )
__UpperCamelCase =np.shape(__lowercase )
__UpperCamelCase =data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =[]
__UpperCamelCase =0
for i_map in range(__lowercase ):
__UpperCamelCase =np.ones((size_map, size_map) )
for i in range(0 , __lowercase , __lowercase ):
for j in range(0 , __lowercase , __lowercase ):
__UpperCamelCase =pd_pool[
i_pool
]
__UpperCamelCase =i_pool + 1
__UpperCamelCase =np.multiply(
__lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(__lowercase )
return pd_all
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_=bool ) -> str:
# model traning
print('----------------------Start Training-------------------------' )
print((' - - Shape: Train_Data ', np.shape(__lowercase )) )
print((' - - Shape: Teach_Data ', np.shape(__lowercase )) )
__UpperCamelCase =0
__UpperCamelCase =[]
__UpperCamelCase =10000
while rp < n_repeat and mse >= error_accuracy:
__UpperCamelCase =0
print(f'-------------Learning Time {rp}--------------' )
for p in range(len(__lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
__UpperCamelCase =np.asmatrix(datas_train[p] )
__UpperCamelCase =np.asarray(datas_teach[p] )
__UpperCamelCase , __UpperCamelCase =self.convolute(
__lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__UpperCamelCase =self.pooling(__lowercase , self.size_poolinga )
__UpperCamelCase =np.shape(__lowercase )
__UpperCamelCase =self._expand(__lowercase )
__UpperCamelCase =data_bp_input
__UpperCamelCase =np.dot(__lowercase , self.vji.T ) - self.thre_bpa
__UpperCamelCase =self.sig(__lowercase )
__UpperCamelCase =np.dot(__lowercase , self.wkj.T ) - self.thre_bpa
__UpperCamelCase =self.sig(__lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
__UpperCamelCase =np.multiply(
(data_teach - bp_outa) , np.multiply(__lowercase , (1 - bp_outa) ) )
__UpperCamelCase =np.multiply(
np.dot(__lowercase , self.wkj ) , np.multiply(__lowercase , (1 - bp_outa) ) )
__UpperCamelCase =np.dot(__lowercase , self.vji )
__UpperCamelCase =pd_i_all / (self.size_poolinga * self.size_poolinga)
__UpperCamelCase =pd_conva_pooled.T.getA().tolist()
__UpperCamelCase =self._calculate_gradient_from_pool(
__lowercase , __lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
__UpperCamelCase =self._expand_mat(pd_conva_all[k_conv] )
__UpperCamelCase =self.rate_weight * np.dot(__lowercase , __lowercase )
__UpperCamelCase =self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
__UpperCamelCase =(
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
__UpperCamelCase =self.wkj + pd_k_all.T * bp_outa * self.rate_weight
__UpperCamelCase =self.vji + pd_j_all.T * bp_outa * self.rate_weight
__UpperCamelCase =self.thre_bpa - pd_k_all * self.rate_thre
__UpperCamelCase =self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
__UpperCamelCase =np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
__UpperCamelCase =rp + 1
__UpperCamelCase =error_count / patterns
all_mse.append(__lowercase )
def draw_error():
__UpperCamelCase =[error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(__lowercase , '+-' )
plt.plot(__lowercase , 'r--' )
plt.xlabel('Learning Times' )
plt.ylabel('All_mse' )
plt.grid(__lowercase , alpha=0.5 )
plt.show()
print('------------------Training Complished---------------------' )
print((' - - Training epoch: ', rp, f' - - Mse: {mse:.6f}') )
if draw_e:
draw_error()
return mse
def _a ( self , A_ ) -> List[str]:
# model predict
__UpperCamelCase =[]
print('-------------------Start Testing-------------------------' )
print((' - - Shape: Test_Data ', np.shape(__lowercase )) )
for p in range(len(__lowercase ) ):
__UpperCamelCase =np.asmatrix(datas_test[p] )
__UpperCamelCase , __UpperCamelCase =self.convolute(
__lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__UpperCamelCase =self.pooling(__lowercase , self.size_poolinga )
__UpperCamelCase =self._expand(__lowercase )
__UpperCamelCase =data_bp_input
__UpperCamelCase =bp_outa * self.vji.T - self.thre_bpa
__UpperCamelCase =self.sig(__lowercase )
__UpperCamelCase =bp_outa * self.wkj.T - self.thre_bpa
__UpperCamelCase =self.sig(__lowercase )
produce_out.extend(bp_outa.getA().tolist() )
__UpperCamelCase =[list(map(self.do_round , __lowercase ) ) for each in produce_out]
return np.asarray(__lowercase )
def _a ( self , A_ ) -> Optional[int]:
# return the data of image after convoluting process so we can check it out
__UpperCamelCase =np.asmatrix(__lowercase )
__UpperCamelCase , __UpperCamelCase =self.convolute(
__lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
__UpperCamelCase =self.pooling(__lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_A = logging.get_logger(__name__)
class UpperCAmelCase__ ( __snake_case ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 'AutoTokenizer'
UpperCAmelCase__ : Tuple = ['tokenizer']
UpperCAmelCase__ : int = {
'semantic_prompt': 1,
'coarse_prompt': 2,
'fine_prompt': 2,
}
def __init__( self , A_ , A_=None ) -> Dict:
super().__init__(A_ )
__UpperCamelCase =speaker_embeddings
@classmethod
def _a ( cls , A_ , A_="speaker_embeddings_path.json" , **A_ ) -> Union[str, Any]:
if speaker_embeddings_dict_path is not None:
__UpperCamelCase =get_file_from_repo(
A_ , A_ , subfolder=kwargs.pop('subfolder' , A_ ) , cache_dir=kwargs.pop('cache_dir' , A_ ) , force_download=kwargs.pop('force_download' , A_ ) , proxies=kwargs.pop('proxies' , A_ ) , resume_download=kwargs.pop('resume_download' , A_ ) , local_files_only=kwargs.pop('local_files_only' , A_ ) , use_auth_token=kwargs.pop('use_auth_token' , A_ ) , revision=kwargs.pop('revision' , A_ ) , )
if speaker_embeddings_path is None:
logger.warning(
f'`{os.path.join(A_ , A_ )}` does not exists\n , no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json\n dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.' )
__UpperCamelCase =None
else:
with open(A_ ) as speaker_embeddings_json:
__UpperCamelCase =json.load(A_ )
else:
__UpperCamelCase =None
__UpperCamelCase =AutoTokenizer.from_pretrained(A_ , **A_ )
return cls(tokenizer=A_ , speaker_embeddings=A_ )
def _a ( self , A_ , A_="speaker_embeddings_path.json" , A_="speaker_embeddings" , A_ = False , **A_ , ) -> Tuple:
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(A_ , A_ , 'v2' ) , exist_ok=A_ )
__UpperCamelCase ={}
__UpperCamelCase =save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
__UpperCamelCase =self._load_voice_preset(A_ )
__UpperCamelCase ={}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict['repo_or_path'] , A_ , f'{prompt_key}_{key}' ) , voice_preset[key] , allow_pickle=A_ , )
__UpperCamelCase =os.path.join(A_ , f'{prompt_key}_{key}.npy' )
__UpperCamelCase =tmp_dict
with open(os.path.join(A_ , A_ ) , 'w' ) as fp:
json.dump(A_ , A_ )
super().save_pretrained(A_ , A_ , **A_ )
def _a ( self , A_ = None , **A_ ) -> List[Any]:
__UpperCamelCase =self.speaker_embeddings[voice_preset]
__UpperCamelCase ={}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
f'Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].' )
__UpperCamelCase =get_file_from_repo(
self.speaker_embeddings.get('repo_or_path' , '/' ) , voice_preset_paths[key] , subfolder=kwargs.pop('subfolder' , A_ ) , cache_dir=kwargs.pop('cache_dir' , A_ ) , force_download=kwargs.pop('force_download' , A_ ) , proxies=kwargs.pop('proxies' , A_ ) , resume_download=kwargs.pop('resume_download' , A_ ) , local_files_only=kwargs.pop('local_files_only' , A_ ) , use_auth_token=kwargs.pop('use_auth_token' , A_ ) , revision=kwargs.pop('revision' , A_ ) , )
if path is None:
raise ValueError(
f'`{os.path.join(self.speaker_embeddings.get("repo_or_path" , "/" ) , voice_preset_paths[key] )}` does not exists\n , no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}\n embeddings.' )
__UpperCamelCase =np.load(A_ )
return voice_preset_dict
def _a ( self , A_ = None ) -> Optional[int]:
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(f'Voice preset unrecognized, missing {key} as a key.' )
if not isinstance(voice_preset[key] , np.ndarray ):
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(f'{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.' )
def __call__( self , A_=None , A_=None , A_="pt" , A_=256 , A_=False , A_=True , A_=False , **A_ , ) -> List[Any]:
if voice_preset is not None and not isinstance(A_ , A_ ):
if (
isinstance(A_ , A_ )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
__UpperCamelCase =self._load_voice_preset(A_ )
else:
if isinstance(A_ , A_ ) and not voice_preset.endswith('.npz' ):
__UpperCamelCase =voice_preset + ".npz"
__UpperCamelCase =np.load(A_ )
if voice_preset is not None:
self._validate_voice_preset_dict(A_ , **A_ )
__UpperCamelCase =BatchFeature(data=A_ , tensor_type=A_ )
__UpperCamelCase =self.tokenizer(
A_ , return_tensors=A_ , padding='max_length' , max_length=A_ , return_attention_mask=A_ , return_token_type_ids=A_ , add_special_tokens=A_ , **A_ , )
if voice_preset is not None:
__UpperCamelCase =voice_preset
return encoded_text
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
__UpperCamelCase =features.copy()
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =jsonl_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[jsonl_path]
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str=("train",) ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
__UpperCamelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =JsonDatasetReader({'train': jsonl_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =JsonDatasetReader({'train': jsonl_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
if split:
__UpperCamelCase ={split: jsonl_path}
else:
__UpperCamelCase ='train'
__UpperCamelCase ={'train': jsonl_path, 'test': jsonl_path}
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =JsonDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_json_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
return [json.loads(SCREAMING_SNAKE_CASE__ ) for line in buffer]
class UpperCAmelCase__ :
"""simple docstring"""
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _a ( self , A_ , A_ , A_ ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , lines=A_ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(A_ )
assert isinstance(A_ , A_ )
assert isinstance(exported_content[0] , A_ )
assert len(A_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> int:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , lines=A_ , orient=A_ ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(A_ )
assert isinstance(A_ , A_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A_ ) == 10
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def _a ( self , A_ , A_ , A_ ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , lines=A_ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json_function(A_ )
assert isinstance(A_ , A_ )
assert isinstance(exported_content[0] , A_ )
assert len(A_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , lines=A_ , orient=A_ , num_proc=2 ).write()
buffer.seek(0 )
__UpperCamelCase =load_json(A_ )
assert isinstance(A_ , A_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A_ ) == 10
def _a ( self , A_ ) -> Any:
with pytest.raises(A_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ , A_ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / f'test.json.{extension}'
__UpperCamelCase =str(shared_datadir / f'test_file.json.{extension}' )
JsonDatasetWriter(A_ , A_ , compression=A_ ).write()
with fsspec.open(A_ , 'rb' , compression='infer' ) as f:
__UpperCamelCase =f.read()
with fsspec.open(A_ , 'rb' , compression='infer' ) as f:
__UpperCamelCase =f.read()
assert exported_content == original_content
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=12 , A_=7 , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=32 , A_=2 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , A_=0 , A_=None , ) -> List[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =bos_token_id
def _a ( self ) -> Dict:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__UpperCamelCase =input_mask.numpy()
__UpperCamelCase =input_mask.shape
__UpperCamelCase =np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCamelCase__ ):
__UpperCamelCase =1
__UpperCamelCase =0
__UpperCamelCase =self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCamelCase__ )
def _a ( self ) -> List[str]:
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _a ( self , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =TFBlipTextModel(config=lowerCamelCase__ )
__UpperCamelCase =model(lowerCamelCase__ , attention_mask=lowerCamelCase__ , training=lowerCamelCase__ )
__UpperCamelCase =model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase =config_and_inputs
__UpperCamelCase ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = (TFBlipTextModel,) if is_tf_available() else ()
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def _a ( self ) -> List[str]:
__UpperCamelCase =BlipTextModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=lowerCamelCase__ , hidden_size=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Tuple:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> Dict:
pass
@unittest.skip(reason='Blip does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self ) -> Any:
pass
@unittest.skip(reason='BlipTextModel has no base class and is not available in MODEL_MAPPING' )
def _a ( self ) -> Optional[int]:
pass
@slow
def _a ( self ) -> Optional[Any]:
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFBlipTextModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def _a ( self , A_=True ) -> Dict:
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCamelCase__ )
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
__UpperCamelCase =hex_num[0] == '-'
if is_negative:
__UpperCamelCase =hex_num[1:]
try:
__UpperCamelCase =int(lowercase__ , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
__UpperCamelCase =''
while int_num > 0:
__UpperCamelCase =str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_A = logging.get_logger(__name__)
_A = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_A = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_A = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_A = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_A = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_A = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_A = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_A = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_A = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_A = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_A = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_A = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_A = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_A = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_MAPPING
_A = auto_class_update(FlaxAutoModel)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_A = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_A = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_A = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_A = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : int = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_A = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Dict = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_A = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_A = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_A = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Dict = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_A = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_A = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Dict = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_A = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_A = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
) | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase__ ( A_ ):
UpperCAmelCase__ : List[str] = "informer"
UpperCAmelCase__ : str = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self , A_ = None , A_ = None , A_ = "student_t" , A_ = "nll" , A_ = 1 , A_ = None , A_ = "mean" , A_ = 0 , A_ = 0 , A_ = 0 , A_ = 0 , A_ = None , A_ = None , A_ = 64 , A_ = 32 , A_ = 32 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = 2 , A_ = True , A_ = "gelu" , A_ = 0.05 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 0.1 , A_ = 100 , A_ = 0.02 , A_=True , A_ = "prob" , A_ = 5 , A_ = True , **A_ , ) -> int:
# time series specific configuration
__UpperCamelCase =prediction_length
__UpperCamelCase =context_length or prediction_length
__UpperCamelCase =distribution_output
__UpperCamelCase =loss
__UpperCamelCase =input_size
__UpperCamelCase =num_time_features
__UpperCamelCase =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__UpperCamelCase =scaling
__UpperCamelCase =num_dynamic_real_features
__UpperCamelCase =num_static_real_features
__UpperCamelCase =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__UpperCamelCase =cardinality
else:
__UpperCamelCase =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase__ ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__UpperCamelCase =embedding_dimension
else:
__UpperCamelCase =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__UpperCamelCase =num_parallel_samples
# Transformer architecture configuration
__UpperCamelCase =input_size * len(self.lags_sequence ) + self._number_of_features
__UpperCamelCase =d_model
__UpperCamelCase =encoder_attention_heads
__UpperCamelCase =decoder_attention_heads
__UpperCamelCase =encoder_ffn_dim
__UpperCamelCase =decoder_ffn_dim
__UpperCamelCase =encoder_layers
__UpperCamelCase =decoder_layers
__UpperCamelCase =dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =activation_dropout
__UpperCamelCase =encoder_layerdrop
__UpperCamelCase =decoder_layerdrop
__UpperCamelCase =activation_function
__UpperCamelCase =init_std
__UpperCamelCase =use_cache
# Informer
__UpperCamelCase =attention_type
__UpperCamelCase =sampling_factor
__UpperCamelCase =distil
super().__init__(is_encoder_decoder=UpperCAmelCase__ , **UpperCAmelCase__ )
@property
def _a ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
_A = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
return sd
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int=rename_keys_prefix ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
__UpperCamelCase =key
for name_pair in rename_keys_prefix:
__UpperCamelCase =new_key.replace(name_pair[0] , name_pair[1] )
__UpperCamelCase =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
__UpperCamelCase =new_d['cls.predictions.bias']
return new_d
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int ):
assert (
checkpoint_path.split('/' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
__UpperCamelCase ='pretraining'
if "vcr" in checkpoint_path:
__UpperCamelCase ={'visual_embedding_dim': 5_12}
elif "vqa_advanced" in checkpoint_path:
__UpperCamelCase ={'visual_embedding_dim': 20_48}
elif "vqa" in checkpoint_path:
__UpperCamelCase ={'visual_embedding_dim': 20_48}
elif "nlvr" in checkpoint_path:
__UpperCamelCase ={'visual_embedding_dim': 10_24}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
__UpperCamelCase ={'visual_embedding_dim': 5_12}
__UpperCamelCase ='multichoice'
elif "vqa_advanced" in checkpoint_path:
__UpperCamelCase ={'visual_embedding_dim': 20_48}
__UpperCamelCase ='vqa_advanced'
elif "vqa" in checkpoint_path:
__UpperCamelCase ={'visual_embedding_dim': 20_48, 'num_labels': 31_29}
__UpperCamelCase ='vqa'
elif "nlvr" in checkpoint_path:
__UpperCamelCase ={
'visual_embedding_dim': 10_24,
'num_labels': 2,
}
__UpperCamelCase ='nlvr'
__UpperCamelCase =VisualBertConfig(**SCREAMING_SNAKE_CASE__ )
# Load State Dict
__UpperCamelCase =load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =get_new_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if model_type == "pretraining":
__UpperCamelCase =VisualBertForPreTraining(SCREAMING_SNAKE_CASE__ )
elif model_type == "vqa":
__UpperCamelCase =VisualBertForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
elif model_type == "nlvr":
__UpperCamelCase =VisualBertForVisualReasoning(SCREAMING_SNAKE_CASE__ )
elif model_type == "multichoice":
__UpperCamelCase =VisualBertForMultipleChoice(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
_A = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=32 , A_=2 , A_=3 , A_=16 , A_=[32, 64, 128] , A_=[1, 2, 1] , A_=[2, 2, 4] , A_=2 , A_=2.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=True , A_=0.02 , A_=1E-5 , A_=True , A_=None , A_=True , A_=10 , A_=8 , A_=["stage1", "stage2"] , A_=[1, 2] , ) -> str:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =embed_dim
__UpperCamelCase =hidden_sizes
__UpperCamelCase =depths
__UpperCamelCase =num_heads
__UpperCamelCase =window_size
__UpperCamelCase =mlp_ratio
__UpperCamelCase =qkv_bias
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =drop_path_rate
__UpperCamelCase =hidden_act
__UpperCamelCase =use_absolute_embeddings
__UpperCamelCase =patch_norm
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =initializer_range
__UpperCamelCase =is_training
__UpperCamelCase =scope
__UpperCamelCase =use_labels
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =encoder_stride
__UpperCamelCase =out_features
__UpperCamelCase =out_indices
def _a ( self ) -> List[str]:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> str:
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =FocalNetModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
__UpperCamelCase =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCamelCase =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _a ( self , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__UpperCamelCase =None
__UpperCamelCase =FocalNetBackbone(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =FocalNetForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__UpperCamelCase =1
__UpperCamelCase =FocalNetForMaskedImageModeling(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _a ( self , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase =1
__UpperCamelCase =FocalNetForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Any = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Union[str, Any] = False
def _a ( self ) -> List[Any]:
__UpperCamelCase =FocalNetModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , embed_dim=37 , has_text_modality=A_ )
def _a ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _a ( self ) -> Tuple:
return
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@unittest.skip(reason='FocalNet does not use inputs_embeds' )
def _a ( self ) -> Any:
pass
@unittest.skip(reason='FocalNet does not use feedforward chunking' )
def _a ( self ) -> Tuple:
pass
def _a ( self ) -> Any:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
__UpperCamelCase =model(**self._prepare_for_class(A_ , A_ ) )
__UpperCamelCase =outputs.hidden_states
__UpperCamelCase =getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# FocalNet has a different seq_length
__UpperCamelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__UpperCamelCase =outputs.reshaped_hidden_states
self.assertEqual(len(A_ ) , A_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =reshaped_hidden_states[0].shape
__UpperCamelCase =(
reshaped_hidden_states[0].view(A_ , A_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase =True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase =True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def _a ( self ) -> str:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCamelCase =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCamelCase =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCamelCase =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__UpperCamelCase =True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCamelCase =True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@slow
def _a ( self ) -> List[str]:
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =FocalNetModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _a ( self ) -> str:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> str:
return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None
@slow
def _a ( self ) -> Any:
__UpperCamelCase =FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([0.2166, -0.4368, 0.2191] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = (FocalNetBackbone,) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = FocalNetConfig
UpperCAmelCase__ : Union[str, Any] = False
def _a ( self ) -> int:
__UpperCamelCase =FocalNetModelTester(self )
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
from typing import Dict, Optional
import numpy as np
import datasets
_A = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
_A = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
_A = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : Any = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
__UpperCamelCase =new_id
# turn into Numpy arrays
__UpperCamelCase =np.array(lowercase__ )
__UpperCamelCase =np.array(lowercase__ )
if reduce_labels:
__UpperCamelCase =2_55
__UpperCamelCase =label - 1
__UpperCamelCase =2_55
__UpperCamelCase =label != ignore_index
__UpperCamelCase =np.not_equal(lowercase__ , lowercase__ )
__UpperCamelCase =pred_label[mask]
__UpperCamelCase =np.array(lowercase__ )[mask]
__UpperCamelCase =pred_label[pred_label == label]
__UpperCamelCase =np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
__UpperCamelCase =np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
__UpperCamelCase =np.histogram(lowercase__ , bins=lowercase__ , range=(0, num_labels - 1) )[0]
__UpperCamelCase =area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple = None , SCREAMING_SNAKE_CASE__ : Any = False , ):
__UpperCamelCase =np.zeros((num_labels,) , dtype=np.floataa )
__UpperCamelCase =np.zeros((num_labels,) , dtype=np.floataa )
__UpperCamelCase =np.zeros((num_labels,) , dtype=np.floataa )
__UpperCamelCase =np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(lowercase__ , lowercase__ ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =intersect_and_union(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple = None , SCREAMING_SNAKE_CASE__ : List[str] = None , SCREAMING_SNAKE_CASE__ : Dict = False , ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =total_intersect_and_union(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
# compute metrics
__UpperCamelCase ={}
__UpperCamelCase =total_area_intersect.sum() / total_area_label.sum()
__UpperCamelCase =total_area_intersect / total_area_union
__UpperCamelCase =total_area_intersect / total_area_label
__UpperCamelCase =np.nanmean(lowercase__ )
__UpperCamelCase =np.nanmean(lowercase__ )
__UpperCamelCase =all_acc
__UpperCamelCase =iou
__UpperCamelCase =acc
if nan_to_num is not None:
__UpperCamelCase ={metric: np.nan_to_num(lowercase__ , nan=lowercase__ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _a ( self ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
'predictions': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
'references': datasets.Sequence(datasets.Sequence(datasets.Value('uint16' ) ) ),
} ) , reference_urls=[
'https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py'
] , )
def _a ( self , A_ , A_ , A_ , A_ , A_ = None , A_ = None , A_ = False , ) -> List[str]:
__UpperCamelCase =mean_iou(
results=__UpperCamelCase , gt_seg_maps=__UpperCamelCase , num_labels=__UpperCamelCase , ignore_index=__UpperCamelCase , nan_to_num=__UpperCamelCase , label_map=__UpperCamelCase , reduce_labels=__UpperCamelCase , )
return iou_result
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/config.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/config.json',
}
class UpperCAmelCase__ ( a__ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = """xlnet"""
UpperCAmelCase__ : Optional[Any] = ["""mems"""]
UpperCAmelCase__ : Any = {
"""n_token""": """vocab_size""", # Backward compatibility
"""hidden_size""": """d_model""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , A_=32000 , A_=1024 , A_=24 , A_=16 , A_=4096 , A_="gelu" , A_=True , A_="bi" , A_=0.02 , A_=1E-12 , A_=0.1 , A_=512 , A_=None , A_=True , A_=False , A_=False , A_=-1 , A_=False , A_="last" , A_=True , A_="tanh" , A_=0.1 , A_=5 , A_=5 , A_=5 , A_=1 , A_=2 , **A_ , ) -> Dict:
__UpperCamelCase =vocab_size
__UpperCamelCase =d_model
__UpperCamelCase =n_layer
__UpperCamelCase =n_head
if d_model % n_head != 0:
raise ValueError(f'\'d_model % n_head\' ({d_model % n_head}) should be equal to 0' )
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f'`d_head` ({kwargs["d_head"]}) should be equal to `d_model // n_head` ({d_model // n_head})' )
__UpperCamelCase =d_model // n_head
__UpperCamelCase =ff_activation
__UpperCamelCase =d_inner
__UpperCamelCase =untie_r
__UpperCamelCase =attn_type
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =dropout
__UpperCamelCase =mem_len
__UpperCamelCase =reuse_len
__UpperCamelCase =bi_data
__UpperCamelCase =clamp_len
__UpperCamelCase =same_length
__UpperCamelCase =summary_type
__UpperCamelCase =summary_use_proj
__UpperCamelCase =summary_activation
__UpperCamelCase =summary_last_dropout
__UpperCamelCase =start_n_top
__UpperCamelCase =end_n_top
__UpperCamelCase =bos_token_id
__UpperCamelCase =pad_token_id
__UpperCamelCase =eos_token_id
if "use_cache" in kwargs:
warnings.warn(
'The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`'
' instead.' , lowercase__ , )
__UpperCamelCase =kwargs['''use_cache''']
__UpperCamelCase =use_mems_eval
__UpperCamelCase =use_mems_train
super().__init__(pad_token_id=lowercase__ , bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__ )
@property
def _a ( self ) -> List[Any]:
logger.info(f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
return -1
@max_position_embeddings.setter
def _a ( self , A_ ) -> Optional[int]:
raise NotImplementedError(
f'The model {self.model_type} is one of the few models that has no sequence length limit.' )
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = IFInpaintingPipeline
UpperCAmelCase__ : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
UpperCAmelCase__ : Optional[int] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase__ : str = PipelineTesterMixin.required_optional_params - {"""latents"""}
def _a ( self ) -> Optional[int]:
return self._get_dummy_components()
def _a ( self , A_ , A_=0 ) -> Tuple:
if str(lowercase__ ).startswith('mps' ):
__UpperCamelCase =torch.manual_seed(lowercase__ )
else:
__UpperCamelCase =torch.Generator(device=lowercase__ ).manual_seed(lowercase__ )
__UpperCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
__UpperCamelCase =floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase__ ) ).to(lowercase__ )
__UpperCamelCase ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def _a ( self ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _a ( self ) -> List[str]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def _a ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _a ( self ) -> str:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _a ( self ) -> Dict:
self._test_save_load_local()
def _a ( self ) -> Optional[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=64 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =embedding_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> Dict:
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
__UpperCamelCase =MegatronBertModel(config=__snake_case )
model.to(__snake_case )
model.eval()
__UpperCamelCase =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__UpperCamelCase =model(__snake_case , token_type_ids=__snake_case )
__UpperCamelCase =model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =MegatronBertForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
__UpperCamelCase =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =MegatronBertForCausalLM(config=__snake_case )
model.to(__snake_case )
model.eval()
__UpperCamelCase =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =MegatronBertForNextSentencePrediction(config=__snake_case )
model.to(__snake_case )
model.eval()
__UpperCamelCase =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =MegatronBertForPreTraining(config=__snake_case )
model.to(__snake_case )
model.eval()
__UpperCamelCase =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , next_sentence_label=__snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =MegatronBertForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
__UpperCamelCase =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =MegatronBertForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__UpperCamelCase =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =MegatronBertForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
__UpperCamelCase =model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =MegatronBertForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
__UpperCamelCase =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCamelCase =model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.prepare_config_and_inputs()
(
__UpperCamelCase
) =config_and_inputs
__UpperCamelCase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
{
"feature-extraction": MegatronBertModel,
"fill-mask": MegatronBertForMaskedLM,
"question-answering": MegatronBertForQuestionAnswering,
"text-classification": MegatronBertForSequenceClassification,
"text-generation": MegatronBertForCausalLM,
"token-classification": MegatronBertForTokenClassification,
"zero-shot": MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = True
# test_resize_embeddings = False
UpperCAmelCase__ : Any = False
def _a ( self , A_ , A_ , A_=False ) -> Tuple:
__UpperCamelCase =super()._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
if return_labels:
if model_class in get_values(__snake_case ):
__UpperCamelCase =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__snake_case )
__UpperCamelCase =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
return inputs_dict
def _a ( self ) -> str:
__UpperCamelCase =MegatronBertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def _a ( self ) -> str:
self.config_tester.run_common_tests()
def _a ( self ) -> Tuple:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__snake_case )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__snake_case )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__snake_case )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__snake_case )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__snake_case )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__snake_case )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__snake_case )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__snake_case )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return torch.tensor(
a_ , dtype=torch.long , device=a_ , )
_A = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip('Model is not available.' )
def _a ( self ) -> int:
__UpperCamelCase ='''nvidia/megatron-bert-uncased-345m'''
if "MYDIR" in os.environ:
__UpperCamelCase =os.path.join(os.environ['MYDIR'] , __snake_case )
__UpperCamelCase =MegatronBertModel.from_pretrained(__snake_case )
model.to(__snake_case )
model.half()
__UpperCamelCase =_long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] )
with torch.no_grad():
__UpperCamelCase =model(__snake_case )[0]
__UpperCamelCase =torch.Size((1, 9, 1024) )
self.assertEqual(output.shape , __snake_case )
__UpperCamelCase =[-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__UpperCamelCase =output[0, ii, jj]
__UpperCamelCase =expected[3 * ii + jj]
__UpperCamelCase ='''ii={} jj={} a={} b={}'''.format(__snake_case , __snake_case , __snake_case , __snake_case )
self.assertTrue(math.isclose(__snake_case , __snake_case , rel_tol=__snake_case , abs_tol=__snake_case ) , msg=__snake_case )
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ , split=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int ):
if issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =parquet_path
elif issubclass(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[parquet_path]
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict=("train",) ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for split in splits:
__UpperCamelCase =dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__UpperCamelCase =ParquetDatasetReader(
{'train': parquet_path} , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =features.copy() if features else default_expected_features
__UpperCamelCase =(
Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__UpperCamelCase =ParquetDatasetReader({'train': parquet_path} , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
if split:
__UpperCamelCase ={split: parquet_path}
else:
__UpperCamelCase ='train'
__UpperCamelCase ={'train': parquet_path, 'test': parquet_path}
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase ={'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__UpperCamelCase =ParquetDatasetReader(SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read()
_check_parquet_datasetdict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =ParquetDatasetWriter(SCREAMING_SNAKE_CASE__ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
__UpperCamelCase =pq.ParquetFile(tmp_path / 'foo.parquet' )
__UpperCamelCase =pf.read()
assert dataset.data.table == output_table
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =str(shared_datadir / 'test_image_rgb.jpg' )
__UpperCamelCase ={'image': [image_path]}
__UpperCamelCase =Features({'image': Image()} )
__UpperCamelCase =Dataset.from_dict(SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =ParquetDatasetWriter(SCREAMING_SNAKE_CASE__ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
__UpperCamelCase =Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
__UpperCamelCase =ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=SCREAMING_SNAKE_CASE__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
assert get_writer_batch_size(SCREAMING_SNAKE_CASE__ ) == expected
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ ) -> Union[str, Any]:
__UpperCamelCase =val
__UpperCamelCase =None
__UpperCamelCase =None
def _a ( self , A_ ) -> Optional[int]:
if self.val:
if val < self.val:
if self.left is None:
__UpperCamelCase =Node(__a )
else:
self.left.insert(__a )
elif val > self.val:
if self.right is None:
__UpperCamelCase =Node(__a )
else:
self.right.insert(__a )
else:
__UpperCamelCase =val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if root:
inorder(root.left , lowercase_ )
res.append(root.val )
inorder(root.right , lowercase_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
if len(lowercase_ ) == 0:
return arr
__UpperCamelCase =Node(arr[0] )
for i in range(1 , len(lowercase_ ) ):
root.insert(arr[i] )
# Traverse BST in order.
__UpperCamelCase =[]
inorder(lowercase_ , lowercase_ )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import argparse
import os
import re
import packaging.version
_A = 'examples/'
_A = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
_A = {
'init': 'src/transformers/__init__.py',
'setup': 'setup.py',
}
_A = 'README.md'
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
with open(__UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase =f.read()
__UpperCamelCase , __UpperCamelCase =REPLACE_PATTERNS[pattern]
__UpperCamelCase =replace.replace('VERSION' , __UpperCamelCase )
__UpperCamelCase =re_pattern.sub(__UpperCamelCase , __UpperCamelCase )
with open(__UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(__UpperCamelCase )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase , pattern='examples' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def _UpperCAmelCase ( ):
__UpperCamelCase ='🤗 Transformers currently provides the following architectures'
__UpperCamelCase ='1. Want to contribute a new model?'
with open(__UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase =f.readlines()
# Find the start of the list.
__UpperCamelCase =0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__UpperCamelCase =start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
__UpperCamelCase =lines[index].replace(
'https://huggingface.co/docs/transformers/main/model_doc' , 'https://huggingface.co/docs/transformers/model_doc' , )
index += 1
with open(__UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__UpperCamelCase )
def _UpperCAmelCase ( ):
with open(REPLACE_FILES['init'] , 'r' ) as f:
__UpperCamelCase =f.read()
__UpperCamelCase =REPLACE_PATTERNS['init'][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str]=False ):
__UpperCamelCase =get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
__UpperCamelCase =default_version.base_version
elif patch:
__UpperCamelCase =F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__UpperCamelCase =F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__UpperCamelCase =input(F'Which version are you releasing? [{default_version}]' )
if len(__UpperCamelCase ) == 0:
__UpperCamelCase =default_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase , patch=__UpperCamelCase )
if not patch:
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
def _UpperCAmelCase ( ):
__UpperCamelCase =get_version()
__UpperCamelCase =F'{current_version.major}.{current_version.minor + 1}.0.dev0'
__UpperCamelCase =current_version.base_version
# Check with the user we got that right.
__UpperCamelCase =input(F'Which version are we developing now? [{dev_version}]' )
if len(__UpperCamelCase ) == 0:
__UpperCamelCase =dev_version
print(F'Updating version to {version}.' )
global_version_update(__UpperCamelCase )
print('Cleaning main README, don\'t forget to run `make fix-copies`.' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_A = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@require_torch
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =pipeline(
task='zero-shot-audio-classification' , model='hf-internal-testing/tiny-clap-htsat-unfused' )
__UpperCamelCase =load_dataset('ashraq/esc50' )
__UpperCamelCase =dataset['train']['audio'][-1]['array']
__UpperCamelCase =audio_classifier(_lowerCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [{'score': 0.501, 'label': 'Sound of a dog'}, {'score': 0.499, 'label': 'Sound of vaccum cleaner'}] , )
@unittest.skip('No models are available in TF' )
def _a ( self ) -> int:
pass
@slow
@require_torch
def _a ( self ) -> Tuple:
__UpperCamelCase =pipeline(
task='zero-shot-audio-classification' , model='laion/clap-htsat-unfused' , )
# This is an audio of a dog
__UpperCamelCase =load_dataset('ashraq/esc50' )
__UpperCamelCase =dataset['train']['audio'][-1]['array']
__UpperCamelCase =audio_classifier(_lowerCAmelCase , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
] , )
__UpperCamelCase =audio_classifier([audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
__UpperCamelCase =audio_classifier(
[audio] * 5 , candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] , batch_size=5 )
self.assertEqual(
nested_simplify(_lowerCAmelCase ) , [
[
{'score': 0.999, 'label': 'Sound of a dog'},
{'score': 0.001, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 , )
@unittest.skip('No models are available in TF' )
def _a ( self ) -> int:
pass
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
__UpperCamelCase =list(snake_case_ )
__UpperCamelCase =list(snake_case_ )
__UpperCamelCase =0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count += 1
__UpperCamelCase ="""_"""
if count > 1:
return False
else:
return "".join(snake_case_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[]
while True:
__UpperCamelCase =["""$"""] * len(snake_case_ )
__UpperCamelCase =[]
for i in range(len(snake_case_ ) ):
for j in range(i + 1 , len(snake_case_ ) ):
__UpperCamelCase =compare_string(binary[i] , binary[j] )
if k is False:
__UpperCamelCase ="""*"""
__UpperCamelCase ="""*"""
temp.append('X' )
for i in range(len(snake_case_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(snake_case_ ) == 0:
return pi
__UpperCamelCase =list(set(snake_case_ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =[]
for minterm in minterms:
__UpperCamelCase =""""""
for _ in range(snake_case_ ):
__UpperCamelCase =str(minterm % 2 ) + string
minterm //= 2
temp.append(snake_case_ )
return temp
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =list(snake_case_ )
__UpperCamelCase =list(snake_case_ )
__UpperCamelCase =0
for i in range(len(snake_case_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
__UpperCamelCase =[]
__UpperCamelCase =[0] * len(snake_case_ )
for i in range(len(chart[0] ) ):
__UpperCamelCase =0
__UpperCamelCase =-1
for j in range(len(snake_case_ ) ):
if chart[j][i] == 1:
count += 1
__UpperCamelCase =j
if count == 1:
__UpperCamelCase =1
for i in range(len(snake_case_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(snake_case_ ) ):
__UpperCamelCase =0
temp.append(prime_implicants[i] )
while True:
__UpperCamelCase =0
__UpperCamelCase =-1
__UpperCamelCase =0
for i in range(len(snake_case_ ) ):
__UpperCamelCase =chart[i].count(1 )
if count_n > max_n:
__UpperCamelCase =count_n
__UpperCamelCase =i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(snake_case_ ) ):
__UpperCamelCase =0
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =[[0 for x in range(len(snake_case_ ) )] for x in range(len(snake_case_ ) )]
for i in range(len(snake_case_ ) ):
__UpperCamelCase =prime_implicants[i].count('_' )
for j in range(len(snake_case_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , snake_case_ ):
__UpperCamelCase =1
return chart
def _UpperCAmelCase ( ):
__UpperCamelCase =int(input('Enter the no. of variables\n' ) )
__UpperCamelCase =[
float(snake_case_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
__UpperCamelCase =decimal_to_binary(snake_case_ , snake_case_ )
__UpperCamelCase =check(snake_case_ )
print('Prime Implicants are:' )
print(snake_case_ )
__UpperCamelCase =prime_implicant_chart(snake_case_ , snake_case_ )
__UpperCamelCase =selection(snake_case_ , snake_case_ )
print('Essential Prime Implicants are:' )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
_A = 8.314_462 # Unit - J mol-1 K-1
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('Invalid inputs. Enter positive value.' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
_A = logging.get_logger(__name__)
# General docstring
_A = 'MobileNetV1Config'
# Base docstring
_A = 'google/mobilenet_v1_1.0_224'
_A = [1, 1024, 7, 7]
# Image classification docstring
_A = 'google/mobilenet_v1_1.0_224'
_A = 'tabby, tabby cat'
_A = [
'google/mobilenet_v1_1.0_224',
'google/mobilenet_v1_0.75_192',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any]=None ):
__UpperCamelCase ={}
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__UpperCamelCase =model.mobilenet_va
else:
__UpperCamelCase =model
__UpperCamelCase ='MobilenetV1/Conv2d_0/'
__UpperCamelCase =backbone.conv_stem.convolution.weight
__UpperCamelCase =backbone.conv_stem.normalization.bias
__UpperCamelCase =backbone.conv_stem.normalization.weight
__UpperCamelCase =backbone.conv_stem.normalization.running_mean
__UpperCamelCase =backbone.conv_stem.normalization.running_var
for i in range(13 ):
__UpperCamelCase =i + 1
__UpperCamelCase =i * 2
__UpperCamelCase =backbone.layer[pt_index]
__UpperCamelCase =F'MobilenetV1/Conv2d_{tf_index}_depthwise/'
__UpperCamelCase =pointer.convolution.weight
__UpperCamelCase =pointer.normalization.bias
__UpperCamelCase =pointer.normalization.weight
__UpperCamelCase =pointer.normalization.running_mean
__UpperCamelCase =pointer.normalization.running_var
__UpperCamelCase =backbone.layer[pt_index + 1]
__UpperCamelCase =F'MobilenetV1/Conv2d_{tf_index}_pointwise/'
__UpperCamelCase =pointer.convolution.weight
__UpperCamelCase =pointer.normalization.bias
__UpperCamelCase =pointer.normalization.weight
__UpperCamelCase =pointer.normalization.running_mean
__UpperCamelCase =pointer.normalization.running_var
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
__UpperCamelCase ='MobilenetV1/Logits/Conv2d_1c_1x1/'
__UpperCamelCase =model.classifier.weight
__UpperCamelCase =model.classifier.bias
return tf_to_pt_map
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ):
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
__UpperCamelCase =tf.train.list_variables(lowerCamelCase_ )
__UpperCamelCase ={}
for name, shape in init_vars:
logger.info(F'Loading TF weight {name} with shape {shape}' )
__UpperCamelCase =tf.train.load_variable(lowerCamelCase_ , lowerCamelCase_ )
__UpperCamelCase =array
# Build TF to PyTorch weights loading map
__UpperCamelCase =_build_tf_to_pytorch_map(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F'Importing {name}' )
if name not in tf_weights:
logger.info(F'{name} not in tf pre-trained weights, skipping' )
continue
__UpperCamelCase =tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
__UpperCamelCase =np.transpose(lowerCamelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
__UpperCamelCase =array.squeeze().transpose()
else:
__UpperCamelCase =np.transpose(lowerCamelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F'Pointer shape {pointer.shape} and array shape {array.shape} mismatched' )
logger.info(F'Initialize PyTorch weight {name} {array.shape}' )
__UpperCamelCase =torch.from_numpy(lowerCamelCase_ )
tf_weights.pop(lowerCamelCase_ , lowerCamelCase_ )
tf_weights.pop(name + '/RMSProp' , lowerCamelCase_ )
tf_weights.pop(name + '/RMSProp_1' , lowerCamelCase_ )
tf_weights.pop(name + '/ExponentialMovingAverage' , lowerCamelCase_ )
logger.info(F'Weights not copied to PyTorch model: {", ".join(tf_weights.keys() )}' )
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : torch.Tensor , SCREAMING_SNAKE_CASE__ : nn.Convad ):
__UpperCamelCase , __UpperCamelCase =features.shape[-2:]
__UpperCamelCase , __UpperCamelCase =conv_layer.stride
__UpperCamelCase , __UpperCamelCase =conv_layer.kernel_size
if in_height % stride_height == 0:
__UpperCamelCase =max(kernel_height - stride_height , 0 )
else:
__UpperCamelCase =max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
__UpperCamelCase =max(kernel_width - stride_width , 0 )
else:
__UpperCamelCase =max(kernel_width - (in_width % stride_width) , 0 )
__UpperCamelCase =pad_along_width // 2
__UpperCamelCase =pad_along_width - pad_left
__UpperCamelCase =pad_along_height // 2
__UpperCamelCase =pad_along_height - pad_top
__UpperCamelCase =(pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(lowerCamelCase_ , lowerCamelCase_ , 'constant' , 0.0 )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_ = 1 , A_ = 1 , A_ = False , A_ = True , A_ = True , ) -> None:
super().__init__()
__UpperCamelCase =config
if in_channels % groups != 0:
raise ValueError(f'Input channels ({in_channels}) are not divisible by {groups} groups.' )
if out_channels % groups != 0:
raise ValueError(f'Output channels ({out_channels}) are not divisible by {groups} groups.' )
__UpperCamelCase =0 if config.tf_padding else int((kernel_size - 1) / 2 )
__UpperCamelCase =nn.Convad(
in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=_lowerCamelCase , stride=_lowerCamelCase , padding=_lowerCamelCase , groups=_lowerCamelCase , bias=_lowerCamelCase , padding_mode='zeros' , )
if use_normalization:
__UpperCamelCase =nn.BatchNormad(
num_features=_lowerCamelCase , eps=config.layer_norm_eps , momentum=0.9997 , affine=_lowerCamelCase , track_running_stats=_lowerCamelCase , )
else:
__UpperCamelCase =None
if use_activation:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
__UpperCamelCase =ACTaFN[use_activation]
elif isinstance(config.hidden_act , _lowerCamelCase ):
__UpperCamelCase =ACTaFN[config.hidden_act]
else:
__UpperCamelCase =config.hidden_act
else:
__UpperCamelCase =None
def _a ( self , A_ ) -> torch.Tensor:
if self.config.tf_padding:
__UpperCamelCase =apply_tf_padding(_lowerCamelCase , self.convolution )
__UpperCamelCase =self.convolution(_lowerCamelCase )
if self.normalization is not None:
__UpperCamelCase =self.normalization(_lowerCamelCase )
if self.activation is not None:
__UpperCamelCase =self.activation(_lowerCamelCase )
return features
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = MobileNetVaConfig
UpperCAmelCase__ : Union[str, Any] = load_tf_weights_in_mobilenet_va
UpperCAmelCase__ : Optional[Any] = "mobilenet_v1"
UpperCAmelCase__ : int = "pixel_values"
UpperCAmelCase__ : Union[str, Any] = False
def _a ( self , A_ ) -> None:
if isinstance(_lowerCamelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCamelCase , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
_A = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
_A = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." , lowerCAmelCase__ , )
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , A_ , A_ = True ) -> str:
super().__init__(_lowerCamelCase )
__UpperCamelCase =config
__UpperCamelCase =32
__UpperCamelCase =max(int(depth * config.depth_multiplier ) , config.min_depth )
__UpperCamelCase =MobileNetVaConvLayer(
_lowerCamelCase , in_channels=config.num_channels , out_channels=_lowerCamelCase , kernel_size=3 , stride=2 , )
__UpperCamelCase =[1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
__UpperCamelCase =nn.ModuleList()
for i in range(13 ):
__UpperCamelCase =out_channels
if strides[i] == 2 or i == 0:
depth *= 2
__UpperCamelCase =max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=3 , stride=strides[i] , groups=_lowerCamelCase , ) )
self.layer.append(
MobileNetVaConvLayer(
_lowerCamelCase , in_channels=_lowerCamelCase , out_channels=_lowerCamelCase , kernel_size=1 , ) )
__UpperCamelCase =nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _a ( self , A_ ) -> Tuple:
raise NotImplementedError
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , A_ = None , A_ = None , A_ = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
__UpperCamelCase =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
__UpperCamelCase =self.conv_stem(_lowerCamelCase )
__UpperCamelCase =() if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
__UpperCamelCase =layer_module(_lowerCamelCase )
if output_hidden_states:
__UpperCamelCase =all_hidden_states + (hidden_states,)
__UpperCamelCase =hidden_states
if self.pooler is not None:
__UpperCamelCase =torch.flatten(self.pooler(_lowerCamelCase ) , start_dim=1 )
else:
__UpperCamelCase =None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_lowerCamelCase , pooler_output=_lowerCamelCase , hidden_states=_lowerCamelCase , )
@add_start_docstrings(
"\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
class UpperCAmelCase__ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , A_ ) -> None:
super().__init__(_lowerCamelCase )
__UpperCamelCase =config.num_labels
__UpperCamelCase =MobileNetVaModel(_lowerCamelCase )
__UpperCamelCase =self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
__UpperCamelCase =nn.Dropout(config.classifier_dropout_prob , inplace=_lowerCamelCase )
__UpperCamelCase =nn.Linear(_lowerCamelCase , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCamelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCamelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , A_ = None , A_ = None , A_ = None , A_ = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]:
__UpperCamelCase =return_dict if return_dict is not None else self.config.use_return_dict
__UpperCamelCase =self.mobilenet_va(_lowerCamelCase , output_hidden_states=_lowerCamelCase , return_dict=_lowerCamelCase )
__UpperCamelCase =outputs.pooler_output if return_dict else outputs[1]
__UpperCamelCase =self.classifier(self.dropout(_lowerCamelCase ) )
__UpperCamelCase =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__UpperCamelCase ='regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__UpperCamelCase ='single_label_classification'
else:
__UpperCamelCase ='multi_label_classification'
if self.config.problem_type == "regression":
__UpperCamelCase =MSELoss()
if self.num_labels == 1:
__UpperCamelCase =loss_fct(logits.squeeze() , labels.squeeze() )
else:
__UpperCamelCase =loss_fct(_lowerCamelCase , _lowerCamelCase )
elif self.config.problem_type == "single_label_classification":
__UpperCamelCase =CrossEntropyLoss()
__UpperCamelCase =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__UpperCamelCase =BCEWithLogitsLoss()
__UpperCamelCase =loss_fct(_lowerCamelCase , _lowerCamelCase )
if not return_dict:
__UpperCamelCase =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=_lowerCamelCase , logits=_lowerCamelCase , hidden_states=outputs.hidden_states , )
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
from __future__ import annotations
_A : str = "Muhammad Umer Farooq"
_A : Tuple = "MIT"
_A : Union[str, Any] = "1.0.0"
_A : Dict = "Muhammad Umer Farooq"
_A : List[str] = "[email protected]"
_A : Tuple = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class UpperCAmelCase__ ( _A ):
"""simple docstring"""
def __init__( self , A_ ) -> None:
super().__init__()
__UpperCamelCase =[]
__UpperCamelCase =domain
def _a ( self , A_ , A_ ) -> None:
# Only parse the 'anchor' tag.
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__UpperCamelCase =parse.urljoin(self.domain , A_ )
self.urls.append(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
return ".".join(get_sub_domain_name(__snake_case ).split('.' )[-2:] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
return parse.urlparse(__snake_case ).netloc
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str = "https://github.com" ):
__UpperCamelCase =get_domain_name(__snake_case )
# Initialize the parser
__UpperCamelCase =Parser(__snake_case )
try:
# Open URL
__UpperCamelCase =requests.get(__snake_case )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__UpperCamelCase =set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__UpperCamelCase =requests.get(__snake_case )
# Get the valid email.
__UpperCamelCase =re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(__snake_case )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(__snake_case )
if __name__ == "__main__":
_A : Optional[int] = emails_from_url('https://github.com')
print(f"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =FunnelConfig.from_json_file(a__ )
print(F'Building PyTorch model from configuration: {config}' )
__UpperCamelCase =FunnelBaseModel(a__ ) if base_model else FunnelModel(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(a__ , a__ , a__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , a__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
_A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> Optional[Any]:
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(UpperCamelCase__ , UpperCamelCase__ ) ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[float] ) -> Dict:
if point:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for item in point:
if not isinstance(UpperCamelCase__ , (int, float) ):
__UpperCamelCase =(
'Expected a list of numbers as input, found '
F'{type(UpperCamelCase__ ).__name__}'
)
raise TypeError(UpperCamelCase__ )
else:
__UpperCamelCase =F'Expected a list of numbers as input, found {type(UpperCamelCase__ ).__name__}'
raise TypeError(UpperCamelCase__ )
else:
raise ValueError('Missing an input' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : list ) -> Optional[int]:
_validate_point(UpperCamelCase__ )
_validate_point(UpperCamelCase__ )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(UpperCamelCase__ , UpperCamelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =[]
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__UpperCamelCase =state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
__UpperCamelCase =in_proj_weight[
: encoder_config.hidden_size, :
]
__UpperCamelCase =in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__UpperCamelCase =in_proj_weight[
-encoder_config.hidden_size :, :
]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =dct.pop(_UpperCAmelCase )
__UpperCamelCase =val
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
if "handwritten" in checkpoint_url:
__UpperCamelCase ='https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCamelCase ='https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
__UpperCamelCase =Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert('RGB' )
return im
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =ViTConfig(image_size=3_84 , qkv_bias=_UpperCAmelCase )
__UpperCamelCase =TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__UpperCamelCase =7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
__UpperCamelCase =10_24
__UpperCamelCase =40_96
__UpperCamelCase =24
__UpperCamelCase =16
__UpperCamelCase =10_24
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__UpperCamelCase =False
__UpperCamelCase ='relu'
__UpperCamelCase =10_24
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
# load HuggingFace model
__UpperCamelCase =ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase )
__UpperCamelCase =TrOCRForCausalLM(_UpperCAmelCase )
__UpperCamelCase =VisionEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
__UpperCamelCase =torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location='cpu' , check_hash=_UpperCAmelCase )['model']
__UpperCamelCase =create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__UpperCamelCase =state_dict.pop(_UpperCAmelCase )
if key.startswith('decoder' ) and "output_projection" not in key:
__UpperCamelCase =val
else:
__UpperCamelCase =val
# load state dict
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
__UpperCamelCase =ViTImageProcessor(size=encoder_config.image_size )
__UpperCamelCase =RobertaTokenizer.from_pretrained('roberta-large' )
__UpperCamelCase =TrOCRProcessor(_UpperCAmelCase , _UpperCAmelCase )
__UpperCamelCase =processor(images=prepare_img(_UpperCAmelCase ) , return_tensors='pt' ).pixel_values
# verify logits
__UpperCamelCase =torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__UpperCamelCase =model(pixel_values=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
__UpperCamelCase =outputs.logits
__UpperCamelCase =torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
__UpperCamelCase =torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__UpperCamelCase =torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__UpperCamelCase =torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__UpperCamelCase =torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCAmelCase , atol=1E-3 ), "First elements of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_UpperCAmelCase )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
set_seed(770)
_A = {
'c_attn': 'att_proj',
'c_proj': 'out_proj',
'c_fc': 'in_proj',
'transformer.': '',
'h.': 'layers.',
'ln_1': 'layernorm_1',
'ln_2': 'layernorm_2',
'ln_f': 'layernorm_final',
'wpe': 'position_embeds_layer',
'wte': 'input_embeds_layer',
}
_A = {
'text_small': {
'repo_id': 'suno/bark',
'file_name': 'text.pt',
},
'coarse_small': {
'repo_id': 'suno/bark',
'file_name': 'coarse.pt',
},
'fine_small': {
'repo_id': 'suno/bark',
'file_name': 'fine.pt',
},
'text': {
'repo_id': 'suno/bark',
'file_name': 'text_2.pt',
},
'coarse': {
'repo_id': 'suno/bark',
'file_name': 'coarse_2.pt',
},
'fine': {
'repo_id': 'suno/bark',
'file_name': 'fine_2.pt',
},
}
_A = os.path.dirname(os.path.abspath(__file__))
_A = os.path.join(os.path.expanduser('~'), '.cache')
_A = os.path.join(os.getenv('XDG_CACHE_HOME', default_cache_dir), 'suno', 'bark_v0')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict=False ):
__UpperCamelCase =model_type
if use_small:
key += "_small"
return os.path.join(SCREAMING_SNAKE_CASE__ , REMOTE_MODEL_PATHS[key]['file_name'] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any ):
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
hf_hub_download(repo_id=SCREAMING_SNAKE_CASE__ , filename=SCREAMING_SNAKE_CASE__ , local_dir=SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict=False , SCREAMING_SNAKE_CASE__ : Dict="text" ):
if model_type == "text":
__UpperCamelCase =BarkSemanticModel
__UpperCamelCase =BarkSemanticConfig
__UpperCamelCase =BarkSemanticGenerationConfig
elif model_type == "coarse":
__UpperCamelCase =BarkCoarseModel
__UpperCamelCase =BarkCoarseConfig
__UpperCamelCase =BarkCoarseGenerationConfig
elif model_type == "fine":
__UpperCamelCase =BarkFineModel
__UpperCamelCase =BarkFineConfig
__UpperCamelCase =BarkFineGenerationConfig
else:
raise NotImplementedError()
__UpperCamelCase =F'{model_type}_small' if use_small else model_type
__UpperCamelCase =REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
logger.info(F'{model_type} model not found, downloading into `{CACHE_DIR}`.' )
_download(model_info['repo_id'] , model_info['file_name'] )
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=SCREAMING_SNAKE_CASE__ )
# this is a hack
__UpperCamelCase =checkpoint['model_args']
if "input_vocab_size" not in model_args:
__UpperCamelCase =model_args['vocab_size']
__UpperCamelCase =model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
__UpperCamelCase =model_args.pop('n_head' )
__UpperCamelCase =model_args.pop('n_embd' )
__UpperCamelCase =model_args.pop('n_layer' )
__UpperCamelCase =ConfigClass(**checkpoint['model_args'] )
__UpperCamelCase =ModelClass(config=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =GenerationConfigClass()
__UpperCamelCase =model_generation_config
__UpperCamelCase =checkpoint['model']
# fixup checkpoint
__UpperCamelCase ='_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(SCREAMING_SNAKE_CASE__ ):
# replace part of the key with corresponding layer name in HF implementation
__UpperCamelCase =k[len(SCREAMING_SNAKE_CASE__ ) :]
for old_layer_name in new_layer_name_dict:
__UpperCamelCase =new_k.replace(SCREAMING_SNAKE_CASE__ , new_layer_name_dict[old_layer_name] )
__UpperCamelCase =state_dict.pop(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =set(state_dict.keys() ) - set(model.state_dict().keys() )
__UpperCamelCase ={k for k in extra_keys if not k.endswith('.attn.bias' )}
__UpperCamelCase =set(model.state_dict().keys() ) - set(state_dict.keys() )
__UpperCamelCase ={k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(SCREAMING_SNAKE_CASE__ ) != 0:
raise ValueError(F'extra keys found: {extra_keys}' )
if len(SCREAMING_SNAKE_CASE__ ) != 0:
raise ValueError(F'missing keys: {missing_keys}' )
model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =checkpoint['best_val_loss'].item()
logger.info(F'model loaded: {round(n_params/1E6 , 1 )}M params, {round(SCREAMING_SNAKE_CASE__ , 3 )} loss' )
model.eval()
model.to(SCREAMING_SNAKE_CASE__ )
del checkpoint, state_dict
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int]=False , SCREAMING_SNAKE_CASE__ : Union[str, Any]="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
__UpperCamelCase ='cpu' # do conversion on cpu
__UpperCamelCase =_get_ckpt_path(SCREAMING_SNAKE_CASE__ , use_small=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =_load_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , model_type=SCREAMING_SNAKE_CASE__ , use_small=SCREAMING_SNAKE_CASE__ )
# load bark initial model
__UpperCamelCase =_bark_load_model(SCREAMING_SNAKE_CASE__ , 'cpu' , model_type=SCREAMING_SNAKE_CASE__ , use_small=SCREAMING_SNAKE_CASE__ )
if model_type == "text":
__UpperCamelCase =bark_model['model']
if model.num_parameters(exclude_embeddings=SCREAMING_SNAKE_CASE__ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
__UpperCamelCase =5
__UpperCamelCase =10
if model_type in ["text", "coarse"]:
__UpperCamelCase =torch.randint(2_56 , (batch_size, sequence_length) , dtype=torch.int )
__UpperCamelCase =bark_model(SCREAMING_SNAKE_CASE__ )[0]
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
# take last logits
__UpperCamelCase =output_new_model_total.logits[:, [-1], :]
else:
__UpperCamelCase =3
__UpperCamelCase =8
__UpperCamelCase =torch.randint(2_56 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =bark_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , ):
__UpperCamelCase =os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BarkSemanticConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) )
__UpperCamelCase =BarkCoarseConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) )
__UpperCamelCase =BarkFineConfig.from_pretrained(os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) )
__UpperCamelCase =EncodecConfig.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase =BarkSemanticModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BarkCoarseModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BarkFineModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =EncodecModel.from_pretrained('facebook/encodec_24khz' )
__UpperCamelCase =BarkConfig.from_sub_model_configs(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
__UpperCamelCase =BarkModel(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =semantic
__UpperCamelCase =coarseAcoustic
__UpperCamelCase =fineAcoustic
__UpperCamelCase =codec
__UpperCamelCase =bark_generation_config
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
bark.save_pretrained(SCREAMING_SNAKE_CASE__ , repo_id=SCREAMING_SNAKE_CASE__ , push_to_hub=SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument('model_type', type=str, help='text, coarse or fine.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--is_small', action='store_true', help='convert the small version instead of the large.')
_A = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model'}
_A = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
_A = {
'facebook/mbart-large-50-one-to-many-mmt': 1024,
}
# fmt: off
_A = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : List[Any] = []
def __init__( self , A_ , A_=None , A_=None , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_ = None , **A_ , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__A , tgt_lang=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__UpperCamelCase =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase =1
__UpperCamelCase =len(self.sp_model )
__UpperCamelCase ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A )
}
__UpperCamelCase ={v: k for k, v in self.lang_code_to_id.items()}
__UpperCamelCase =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en_XX'
__UpperCamelCase =self.lang_code_to_id[self._src_lang]
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _a ( self ) -> int:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(__A , out_type=__A )
def _a ( self , A_ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase =self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self , A_ ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self , A_ ) -> Tuple:
__UpperCamelCase =[]
__UpperCamelCase =''
__UpperCamelCase =False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
__UpperCamelCase =True
__UpperCamelCase =[]
else:
current_sub_tokens.append(__A )
__UpperCamelCase =False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
if not os.path.isdir(__A ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
__A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self , A_ , A_ , A_ , A_ , **A_ ) -> Optional[int]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
__UpperCamelCase =self.convert_tokens_to_ids(__A )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self , A_ , A_ = "en_XX" , A_ = None , A_ = "ro_RO" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _a ( self ) -> int:
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.lang_code_to_id[src_lang]
__UpperCamelCase =[self.cur_lang_code_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.lang_code_to_id[tgt_lang]
__UpperCamelCase =[self.cur_lang_code_id]
__UpperCamelCase =[self.eos_token_id]
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_A = logging.get_logger(__name__)
_A = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_A = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_A = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_A = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_A = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_A = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_A = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_A = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_A = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_A = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_A = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_A = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_A = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_A = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_A = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : int = FLAX_MODEL_MAPPING
_A = auto_class_update(FlaxAutoModel)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Any = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_A = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_A = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Any = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_A = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : int = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_A = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_A = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_A = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_A = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_A = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_A = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_A = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : str = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_A = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class UpperCAmelCase__ ( _BaseAutoModelClass ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_A = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =cva.getAffineTransform(lowercase_ , lowercase_ )
return cva.warpAffine(lowercase_ , lowercase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
_A = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
_A = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
_A = gray_img.shape
# set different points to rotate image
_A = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
_A = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
_A = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
_A = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
_A = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
_A = plt.figure(1)
_A = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show() | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_A = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class UpperCAmelCase__ :
UpperCAmelCase__ : Any = PegasusConfig
UpperCAmelCase__ : List[Any] = {}
UpperCAmelCase__ : int = 'gelu'
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=False , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_=0.1 , A_=0.1 , A_=20 , A_=2 , A_=1 , A_=0 , ) -> Union[str, Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =eos_token_id
__UpperCamelCase =pad_token_id
__UpperCamelCase =bos_token_id
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
__UpperCamelCase =np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase =np.concatenate([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase =prepare_pegasus_inputs_dict(_a , _a , _a )
return config, inputs_dict
def _a ( self , A_ , A_ , A_ ) -> List[str]:
__UpperCamelCase =20
__UpperCamelCase =model_class_name(_a )
__UpperCamelCase =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase =model.init_cache(decoder_input_ids.shape[0] , _a , _a )
__UpperCamelCase =jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
__UpperCamelCase =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase =model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
__UpperCamelCase =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase =model.decode(
decoder_input_ids[:, -1:] , _a , decoder_attention_mask=_a , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_a , )
__UpperCamelCase =model.decode(_a , _a )
__UpperCamelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def _a ( self , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =20
__UpperCamelCase =model_class_name(_a )
__UpperCamelCase =model.encode(inputs_dict['input_ids'] )
__UpperCamelCase , __UpperCamelCase =(
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
__UpperCamelCase =jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
__UpperCamelCase =model.init_cache(decoder_input_ids.shape[0] , _a , _a )
__UpperCamelCase =jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
__UpperCamelCase =model.decode(
decoder_input_ids[:, :-1] , _a , decoder_attention_mask=_a , past_key_values=_a , decoder_position_ids=_a , )
__UpperCamelCase =jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
__UpperCamelCase =model.decode(
decoder_input_ids[:, -1:] , _a , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_a , decoder_position_ids=_a , )
__UpperCamelCase =model.decode(_a , _a , decoder_attention_mask=_a )
__UpperCamelCase =np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f'Max diff is {diff}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[int]=None , ):
if attention_mask is None:
__UpperCamelCase =np.not_equal(lowerCAmelCase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
__UpperCamelCase =np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase__ : Tuple = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCAmelCase__ : Dict = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : int = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[int]:
__UpperCamelCase =FlaxPegasusModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=_a )
def _a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> Optional[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_a , _a , _a )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_a , _a , _a )
def _a ( self ) -> Tuple:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase =self._prepare_for_class(_a , _a )
__UpperCamelCase =model_class(_a )
@jax.jit
def encode_jitted(A_ , A_=None , **A_ ):
return model.encode(input_ids=_a , attention_mask=_a )
with self.subTest('JIT Enabled' ):
__UpperCamelCase =encode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase =encode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
def _a ( self ) -> str:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__UpperCamelCase =model_class(_a )
__UpperCamelCase =model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
__UpperCamelCase ={
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(A_ , A_ , A_ ):
return model.decode(
decoder_input_ids=_a , decoder_attention_mask=_a , encoder_outputs=_a , )
with self.subTest('JIT Enabled' ):
__UpperCamelCase =decode_jitted(**_a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__UpperCamelCase =decode_jitted(**_a ).to_tuple()
self.assertEqual(len(_a ) , len(_a ) )
for jitted_output, output in zip(_a , _a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _a ( self ) -> str:
for model_class_name in self.all_model_classes:
__UpperCamelCase =model_class_name.from_pretrained('google/pegasus-large' , from_pt=_a )
__UpperCamelCase =np.ones((1, 1) )
__UpperCamelCase =model(_a )
self.assertIsNotNone(_a )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
__UpperCamelCase =PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
__UpperCamelCase =[
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
__UpperCamelCase =[
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
__UpperCamelCase =tokenizer(_a , return_tensors='np' , truncation=_a , max_length=512 , padding=_a )
__UpperCamelCase =model.generate(**_a , num_beams=2 ).sequences
__UpperCamelCase =tokenizer.batch_decode(_a , skip_special_tokens=_a )
assert tgt_text == decoded
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
'''simple docstring'''
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =1
for i in range(1 , num + 1 ):
fact *= i
return fact
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =0
while number > 0:
__UpperCamelCase =number % 10
sum_of_digits += last_digit
__UpperCamelCase =number // 10 # Removing the last_digit from the given number
return sum_of_digits
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 1_00 ):
__UpperCamelCase =factorial(_lowerCamelCase )
__UpperCamelCase =split_and_add(_lowerCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input('Enter the Number: ').strip())))
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
_A = """base_with_context"""
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] ):
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=SCREAMING_SNAKE_CASE__ )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase =weights[F'layers_{lyr_num}']
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase =ly_weight['attention']
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=SCREAMING_SNAKE_CASE__ )
for lyr_num, lyr in enumerate(model.encoders ):
__UpperCamelCase =weights[F'layers_{lyr_num}']
__UpperCamelCase =ly_weight['attention']
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
__UpperCamelCase =weights[F'layers_{lyr_num}']
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase =ly_weight['self_attention']
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase =ly_weight['MultiHeadDotProductAttention_0']
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
__UpperCamelCase =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
__UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(args.checkpoint_path )
__UpperCamelCase =jnp.tree_util.tree_map(onp.array , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
__UpperCamelCase =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
__UpperCamelCase =inference.parse_training_gin_file(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =inference.InferenceModel(args.checkpoint_path , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
__UpperCamelCase =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
__UpperCamelCase =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
__UpperCamelCase =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =load_decoder(ta_checkpoint['target']['decoder'] , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
__UpperCamelCase =SpectrogramDiffusionPipeline(
notes_encoder=SCREAMING_SNAKE_CASE__ , continuous_encoder=SCREAMING_SNAKE_CASE__ , decoder=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , melgan=SCREAMING_SNAKE_CASE__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=f"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
_A = parser.parse_args()
main(args)
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
from __future__ import annotations
import bisect
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = -1 ):
if hi < 0:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE_ )
while lo < hi:
__UpperCamelCase =lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__UpperCamelCase =mid + 1
else:
__UpperCamelCase =mid
return lo
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = -1 ):
if hi < 0:
__UpperCamelCase =len(SCREAMING_SNAKE_CASE_ )
while lo < hi:
__UpperCamelCase =lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__UpperCamelCase =mid + 1
else:
__UpperCamelCase =mid
return lo
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = -1 ):
sorted_collection.insert(bisect_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = -1 ):
sorted_collection.insert(bisect_right(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =0
__UpperCamelCase =len(SCREAMING_SNAKE_CASE_ ) - 1
while left <= right:
__UpperCamelCase =left + (right - left) // 2
__UpperCamelCase =sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__UpperCamelCase =midpoint - 1
else:
__UpperCamelCase =midpoint + 1
return None
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =bisect.bisect_left(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if index != len(SCREAMING_SNAKE_CASE_ ) and sorted_collection[index] == item:
return index
return None
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
if right < left:
return None
__UpperCamelCase =left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , midpoint - 1 )
else:
return binary_search_by_recursion(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , midpoint + 1 , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_A = input('Enter numbers separated by comma:\n').strip()
_A = sorted(int(item) for item in user_input.split(','))
_A = int(input('Enter a single number to be found in the list:\n'))
_A = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase__ :
"""simple docstring"""
UpperCAmelCase__ : List[Any] = XGLMConfig
UpperCAmelCase__ : List[str] = {}
UpperCAmelCase__ : Optional[int] = "gelu"
def __init__( self , A_ , A_=14 , A_=7 , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=0.02 , ) -> Union[str, Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =d_model
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =ffn_dim
__UpperCamelCase =activation_function
__UpperCamelCase =activation_dropout
__UpperCamelCase =attention_dropout
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =initializer_range
__UpperCamelCase =None
__UpperCamelCase =0
__UpperCamelCase =2
__UpperCamelCase =1
def _a ( self ) -> str:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =self.get_config()
__UpperCamelCase =floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _a ( self ) -> Optional[Any]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=A_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=A_ , )
def _a ( self ) -> Any:
__UpperCamelCase =self.prepare_config_and_inputs()
(
__UpperCamelCase
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCAmelCase__ : Optional[int] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCAmelCase__ : Dict = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
def _a ( self ) -> Any:
__UpperCamelCase =TFXGLMModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , n_embd=37 )
def _a ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@slow
def _a ( self ) -> Dict:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFXGLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def _a ( self ) -> int:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self , A_=True ) -> Any:
__UpperCamelCase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase =tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__UpperCamelCase =[2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
__UpperCamelCase =model.generate(A_ , do_sample=A_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , A_ )
@slow
def _a ( self ) -> Tuple:
__UpperCamelCase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__UpperCamelCase =tokenizer('Today is a nice day and' , return_tensors='tf' )
__UpperCamelCase =tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__UpperCamelCase =model.generate(A_ , do_sample=A_ , seed=[7, 0] )
__UpperCamelCase =tokenizer.decode(output_ids[0] , skip_special_tokens=A_ )
__UpperCamelCase =(
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase =XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__UpperCamelCase ='left'
# use different length sentences to test batching
__UpperCamelCase =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__UpperCamelCase =tokenizer(A_ , return_tensors='tf' , padding=A_ )
__UpperCamelCase =inputs['input_ids']
__UpperCamelCase =model.generate(input_ids=A_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__UpperCamelCase =tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__UpperCamelCase =model.generate(input_ids=A_ , max_new_tokens=12 )
__UpperCamelCase =tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__UpperCamelCase =model.generate(input_ids=A_ , max_new_tokens=12 )
__UpperCamelCase =tokenizer.batch_decode(A_ , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_non_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =tokenizer.decode(output_padded[0] , skip_special_tokens=A_ )
__UpperCamelCase =[
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , [non_padded_sentence, padded_sentence] )
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_A = logging.get_logger(__name__)
_A = OrderedDict(
[
('align', 'EfficientNetImageProcessor'),
('beit', 'BeitImageProcessor'),
('bit', 'BitImageProcessor'),
('blip', 'BlipImageProcessor'),
('blip-2', 'BlipImageProcessor'),
('bridgetower', 'BridgeTowerImageProcessor'),
('chinese_clip', 'ChineseCLIPImageProcessor'),
('clip', 'CLIPImageProcessor'),
('clipseg', 'ViTImageProcessor'),
('conditional_detr', 'ConditionalDetrImageProcessor'),
('convnext', 'ConvNextImageProcessor'),
('convnextv2', 'ConvNextImageProcessor'),
('cvt', 'ConvNextImageProcessor'),
('data2vec-vision', 'BeitImageProcessor'),
('deformable_detr', 'DeformableDetrImageProcessor'),
('deit', 'DeiTImageProcessor'),
('deta', 'DetaImageProcessor'),
('detr', 'DetrImageProcessor'),
('dinat', 'ViTImageProcessor'),
('donut-swin', 'DonutImageProcessor'),
('dpt', 'DPTImageProcessor'),
('efficientformer', 'EfficientFormerImageProcessor'),
('efficientnet', 'EfficientNetImageProcessor'),
('flava', 'FlavaImageProcessor'),
('focalnet', 'BitImageProcessor'),
('git', 'CLIPImageProcessor'),
('glpn', 'GLPNImageProcessor'),
('groupvit', 'CLIPImageProcessor'),
('imagegpt', 'ImageGPTImageProcessor'),
('instructblip', 'BlipImageProcessor'),
('layoutlmv2', 'LayoutLMv2ImageProcessor'),
('layoutlmv3', 'LayoutLMv3ImageProcessor'),
('levit', 'LevitImageProcessor'),
('mask2former', 'Mask2FormerImageProcessor'),
('maskformer', 'MaskFormerImageProcessor'),
('mgp-str', 'ViTImageProcessor'),
('mobilenet_v1', 'MobileNetV1ImageProcessor'),
('mobilenet_v2', 'MobileNetV2ImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevit', 'MobileViTImageProcessor'),
('mobilevitv2', 'MobileViTImageProcessor'),
('nat', 'ViTImageProcessor'),
('oneformer', 'OneFormerImageProcessor'),
('owlvit', 'OwlViTImageProcessor'),
('perceiver', 'PerceiverImageProcessor'),
('pix2struct', 'Pix2StructImageProcessor'),
('poolformer', 'PoolFormerImageProcessor'),
('regnet', 'ConvNextImageProcessor'),
('resnet', 'ConvNextImageProcessor'),
('sam', 'SamImageProcessor'),
('segformer', 'SegformerImageProcessor'),
('swiftformer', 'ViTImageProcessor'),
('swin', 'ViTImageProcessor'),
('swin2sr', 'Swin2SRImageProcessor'),
('swinv2', 'ViTImageProcessor'),
('table-transformer', 'DetrImageProcessor'),
('timesformer', 'VideoMAEImageProcessor'),
('tvlt', 'TvltImageProcessor'),
('upernet', 'SegformerImageProcessor'),
('van', 'ConvNextImageProcessor'),
('videomae', 'VideoMAEImageProcessor'),
('vilt', 'ViltImageProcessor'),
('vit', 'ViTImageProcessor'),
('vit_hybrid', 'ViTHybridImageProcessor'),
('vit_mae', 'ViTImageProcessor'),
('vit_msn', 'ViTImageProcessor'),
('xclip', 'CLIPImageProcessor'),
('yolos', 'YolosImageProcessor'),
]
)
_A = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__UpperCamelCase =model_type_to_module_name(__lowercase )
__UpperCamelCase =importlib.import_module(F'.{module_name}' , 'transformers.models' )
try:
return getattr(__lowercase , __lowercase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(__lowercase , '__name__' , __lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__UpperCamelCase =importlib.import_module('transformers' )
if hasattr(__lowercase , __lowercase ):
return getattr(__lowercase , __lowercase )
return None
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE__ : Optional[Union[str, os.PathLike]] = None , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : Optional[Dict[str, str]] = None , SCREAMING_SNAKE_CASE__ : Optional[Union[bool, str]] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = None , SCREAMING_SNAKE_CASE__ : bool = False , **SCREAMING_SNAKE_CASE__ : Dict , ):
__UpperCamelCase =get_file_from_repo(
__lowercase , __lowercase , cache_dir=__lowercase , force_download=__lowercase , resume_download=__lowercase , proxies=__lowercase , use_auth_token=__lowercase , revision=__lowercase , local_files_only=__lowercase , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(__lowercase , encoding='utf-8' ) as reader:
return json.load(__lowercase )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> Tuple:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(A_ )
def _a ( cls , A_ , **A_ ) -> Optional[Any]:
__UpperCamelCase =kwargs.pop('config' , A_ )
__UpperCamelCase =kwargs.pop('trust_remote_code' , A_ )
__UpperCamelCase =True
__UpperCamelCase =ImageProcessingMixin.get_image_processor_dict(A_ , **A_ )
__UpperCamelCase =config_dict.get('image_processor_type' , A_ )
__UpperCamelCase =None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
__UpperCamelCase =config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__UpperCamelCase =config_dict.pop('feature_extractor_type' , A_ )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
__UpperCamelCase =feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__UpperCamelCase =config_dict['auto_map']['AutoFeatureExtractor']
__UpperCamelCase =feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(A_ , A_ ):
__UpperCamelCase =AutoConfig.from_pretrained(A_ , **A_ )
# It could be in `config.image_processor_type``
__UpperCamelCase =getattr(A_ , 'image_processor_type' , A_ )
if hasattr(A_ , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
__UpperCamelCase =config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
__UpperCamelCase =image_processor_class_from_name(A_ )
__UpperCamelCase =image_processor_auto_map is not None
__UpperCamelCase =image_processor_class is not None or type(A_ ) in IMAGE_PROCESSOR_MAPPING
__UpperCamelCase =resolve_trust_remote_code(
A_ , A_ , A_ , A_ )
if has_remote_code and trust_remote_code:
__UpperCamelCase =get_class_from_dynamic_module(
A_ , A_ , **A_ )
__UpperCamelCase =kwargs.pop('code_revision' , A_ )
if os.path.isdir(A_ ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(A_ , **A_ )
elif image_processor_class is not None:
return image_processor_class.from_dict(A_ , **A_ )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(A_ ) in IMAGE_PROCESSOR_MAPPING:
__UpperCamelCase =IMAGE_PROCESSOR_MAPPING[type(A_ )]
return image_processor_class.from_dict(A_ , **A_ )
raise ValueError(
f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def _a ( A_ , A_ ) -> List[str]:
IMAGE_PROCESSOR_MAPPING.register(A_ , A_ )
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
_A = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
_A = {
'facebook/s2t-small-librispeech-asr': 1024,
}
_A = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
_A = {'mustc': MUSTC_LANGS}
class UpperCAmelCase__ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : List[str] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = MAX_MODEL_INPUT_SIZES
UpperCAmelCase__ : List[Any] = ["input_ids", "attention_mask"]
UpperCAmelCase__ : Any = []
def __init__( self , A_ , A_ , A_="<s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_=False , A_=False , A_=None , A_=None , A_ = None , **A_ , ) -> List[Any]:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_A , eos_token=_A , unk_token=_A , pad_token=_A , do_upper_case=_A , do_lower_case=_A , tgt_lang=_A , lang_codes=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
__UpperCamelCase =do_upper_case
__UpperCamelCase =do_lower_case
__UpperCamelCase =load_json(_A )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(_A , self.sp_model_kwargs )
if lang_codes is not None:
__UpperCamelCase =lang_codes
__UpperCamelCase =LANGUAGES[lang_codes]
__UpperCamelCase =[f'<lang:{lang}>' for lang in self.langs]
__UpperCamelCase ={lang: self.sp_model.PieceToId(f'<lang:{lang}>' ) for lang in self.langs}
__UpperCamelCase =self.lang_tokens
__UpperCamelCase =tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__UpperCamelCase ={}
@property
def _a ( self ) -> Tuple:
return len(self.encoder )
@property
def _a ( self ) -> Dict:
return self._tgt_lang
@tgt_lang.setter
def _a ( self , A_ ) -> List[str]:
__UpperCamelCase =new_tgt_lang
self.set_tgt_lang_special_tokens(_A )
def _a ( self , A_ ) -> List[str]:
__UpperCamelCase =self.lang_code_to_id[tgt_lang]
__UpperCamelCase =[lang_code_id]
def _a ( self , A_ ) -> str:
return self.sp_model.encode(_A , out_type=_A )
def _a ( self , A_ ) -> Any:
return self.encoder.get(_A , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> int:
return self.decoder.get(_A , self.unk_token )
def _a ( self , A_ ) -> Optional[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__UpperCamelCase =self.sp_model.decode(_A )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__UpperCamelCase =[]
else:
current_sub_tokens.append(_A )
__UpperCamelCase =self.sp_model.decode(_A )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def _a ( self , A_ , A_=None ) -> str:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def _a ( self , A_ , A_ = None , A_ = False ) -> Tuple:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def _a ( self ) -> str:
__UpperCamelCase =self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Optional[Any]:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> Dict:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Any:
__UpperCamelCase =Path(_A )
assert save_dir.is_dir(), f'{save_directory} should be a directory'
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _A )
if os.path.abspath(self.spm_file ) != os.path.abspath(_A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _A )
elif not os.path.isfile(self.spm_file ):
with open(_A , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(_A )
return (str(_A ), str(_A ))
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**__snake_case )
spm.Load(str(__snake_case ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
with open(__snake_case , 'r' ) as f:
return json.load(__snake_case )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple ):
with open(__snake_case , 'w' ) as f:
json.dump(__snake_case , __snake_case , indent=2 )
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict ):
# Load checkpoint
__UpperCamelCase =torch.load(lowerCAmelCase__ , map_location='cpu' )
__UpperCamelCase =chkpt['model']
# We have the base model one level deeper than the original XLM repository
__UpperCamelCase ={}
for k, v in state_dict.items():
if "pred_layer" in k:
__UpperCamelCase =v
else:
__UpperCamelCase =v
__UpperCamelCase =chkpt['params']
__UpperCamelCase ={n: v for n, v in config.items() if not isinstance(lowerCAmelCase__ , (torch.FloatTensor, numpy.ndarray) )}
__UpperCamelCase =chkpt['dico_word2id']
__UpperCamelCase ={s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
__UpperCamelCase =pytorch_dump_folder_path + '/' + WEIGHTS_NAME
__UpperCamelCase =pytorch_dump_folder_path + '/' + CONFIG_NAME
__UpperCamelCase =pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(F'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(lowerCAmelCase__ , lowerCAmelCase__ )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '\n' )
print(F'Save vocab file to {pytorch_config_dump_path}' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(lowerCAmelCase__ , indent=2 ) + '\n' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--xlm_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_A = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
import numpy as np
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return 1 / (1 + np.exp(-vector ))
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return vector * sigmoid(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =0
while b > 0:
if b & 1:
__UpperCamelCase =((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {name: getattr(transformers, name + 'Fast') for name in SLOW_TO_FAST_CONVERTERS}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
__UpperCamelCase =TOKENIZER_CLASSES
else:
__UpperCamelCase ={tokenizer_name: getattr(_A , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
__UpperCamelCase =TOKENIZER_CLASSES[tokenizer_name]
__UpperCamelCase =True
if checkpoint_name is None:
__UpperCamelCase =list(tokenizer_class.max_model_input_sizes.keys() )
else:
__UpperCamelCase =[checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
__UpperCamelCase =tokenizer_class.from_pretrained(_A , force_download=_A )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
__UpperCamelCase , __UpperCamelCase =checkpoint.split('/' )
__UpperCamelCase =os.path.join(_A , _A )
elif add_prefix:
__UpperCamelCase =checkpoint
__UpperCamelCase =dump_path
else:
__UpperCamelCase =None
__UpperCamelCase =dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__UpperCamelCase =list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__UpperCamelCase =file_path.split(_A )[-1][0]
if next_char == "/":
__UpperCamelCase =os.path.join(_A , _A )
__UpperCamelCase =None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
__UpperCamelCase =tokenizer.save_pretrained(
_A , legacy_format=_A , filename_prefix=_A )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(_A )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output generated fast tokenizer files.'
)
parser.add_argument(
'--tokenizer_name',
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
'download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--checkpoint_name',
default=None,
type=str,
help='Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.',
)
parser.add_argument(
'--force_download',
action='store_true',
help='Re-download checkpoints.',
)
_A = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =[chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
__UpperCamelCase =remove_duplicates(key.upper() )
__UpperCamelCase =len(A_ )
# First fill cipher with key characters
__UpperCamelCase ={alphabet[i]: char for i, char in enumerate(A_ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(A_ ) , 26 ):
__UpperCamelCase =alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__UpperCamelCase =alphabet[i - offset]
__UpperCamelCase =char
return cipher_alphabet
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
return "".join(cipher_map.get(A_ , A_ ) for ch in message.upper() )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase ={v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(A_ , A_ ) for ch in message.upper() )
def _UpperCAmelCase ( ):
__UpperCamelCase =input('Enter message to encode or decode: ' ).strip()
__UpperCamelCase =input('Enter keyword: ' ).strip()
__UpperCamelCase =input('Encipher or decipher? E/D:' ).strip()[0].lower()
try:
__UpperCamelCase ={'e': encipher, 'd': decipher}[option]
except KeyError:
raise KeyError('invalid input option' )
__UpperCamelCase =create_cipher_map(A_ )
print(func(A_ , A_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
from __future__ import annotations
import time
import numpy as np
_A = [8, 5, 9, 7]
_A = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_A = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =claim_vector
__UpperCamelCase =allocated_resources_table
__UpperCamelCase =maximum_claim_table
def _a ( self ) -> int:
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _a ( self ) -> Tuple:
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _a ( self ) -> List[str]:
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_A ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _a ( self ) -> Any:
return {self.__need().index(_A ): i for i in self.__need()}
def _a ( self , **A_ ) -> Optional[Any]:
__UpperCamelCase =self.__need()
__UpperCamelCase =self.__allocated_resources_table
__UpperCamelCase =self.__available_resources()
__UpperCamelCase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__UpperCamelCase =False
for each_need in need_list:
__UpperCamelCase =True
for index, need in enumerate(_A ):
if need > available_resources[index]:
__UpperCamelCase =False
break
if execution:
__UpperCamelCase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__UpperCamelCase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(_A )
# update available/freed resources stack
__UpperCamelCase =np.array(_A ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_A ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def _a ( self ) -> Dict:
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(_A ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(_A ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_A ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_A ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
from math import pi, sqrt, tan
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] ):
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Any ):
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__UpperCamelCase =(height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(__snake_case , 2 ) * torus_radius * tube_radius
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int ):
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__UpperCamelCase =(sidea + sidea + sidea) / 2
__UpperCamelCase =sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int ):
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
if not isinstance(__snake_case , __snake_case ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \\nequal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \\nlength of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"""Rectangle: {area_rectangle(10, 20) = }""")
print(f"""Square: {area_square(10) = }""")
print(f"""Triangle: {area_triangle(10, 10) = }""")
print(f"""Triangle: {area_triangle_three_sides(5, 12, 13) = }""")
print(f"""Parallelogram: {area_parallelogram(10, 20) = }""")
print(f"""Rhombus: {area_rhombus(10, 20) = }""")
print(f"""Trapezium: {area_trapezium(10, 20, 30) = }""")
print(f"""Circle: {area_circle(20) = }""")
print(f"""Ellipse: {area_ellipse(10, 20) = }""")
print('\nSurface Areas of various geometric shapes: \n')
print(f"""Cube: {surface_area_cube(20) = }""")
print(f"""Cuboid: {surface_area_cuboid(10, 20, 30) = }""")
print(f"""Sphere: {surface_area_sphere(20) = }""")
print(f"""Hemisphere: {surface_area_hemisphere(20) = }""")
print(f"""Cone: {surface_area_cone(10, 20) = }""")
print(f"""Conical Frustum: {surface_area_conical_frustum(10, 20, 30) = }""")
print(f"""Cylinder: {surface_area_cylinder(10, 20) = }""")
print(f"""Torus: {surface_area_torus(20, 10) = }""")
print(f"""Equilateral Triangle: {area_reg_polygon(3, 10) = }""")
print(f"""Square: {area_reg_polygon(4, 10) = }""")
print(f"""Reqular Pentagon: {area_reg_polygon(5, 10) = }""")
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
_A : List[str] = (
'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py'
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
return (preds == labels).mean()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
__UpperCamelCase =simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__UpperCamelCase =fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
__UpperCamelCase =pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0]
__UpperCamelCase =spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), F'Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}'
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ):
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , 'sklearn' )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(F'Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}' )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
from __future__ import annotations
from fractions import Fraction
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return (
num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[]
__UpperCamelCase =11
__UpperCamelCase =int('1' + '0' * digit_len )
for num in range(A__ , A__ ):
while den <= 99:
if (num != den) and (num % 10 == den // 10) and (den % 10 != 0):
if is_digit_cancelling(A__ , A__ ):
solutions.append(F'{num}/{den}' )
den += 1
num += 1
__UpperCamelCase =10
return solutions
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 2 ):
__UpperCamelCase =1.0
for fraction in fraction_list(A__ ):
__UpperCamelCase =Fraction(A__ )
result *= frac.denominator / frac.numerator
return int(A__ )
if __name__ == "__main__":
print(solution())
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = AutoencoderKL
UpperCAmelCase__ : Tuple = "sample"
UpperCAmelCase__ : Any = 1e-2
@property
def _a ( self ) -> int:
__UpperCamelCase =4
__UpperCamelCase =3
__UpperCamelCase =(32, 32)
__UpperCamelCase =floats_tensor((batch_size, num_channels) + sizes ).to(A_ )
return {"sample": image}
@property
def _a ( self ) -> Union[str, Any]:
return (3, 32, 32)
@property
def _a ( self ) -> Tuple:
return (3, 32, 32)
def _a ( self ) -> int:
__UpperCamelCase ={
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
__UpperCamelCase =self.dummy_input
return init_dict, inputs_dict
def _a ( self ) -> Optional[int]:
pass
def _a ( self ) -> Optional[Any]:
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS' )
def _a ( self ) -> int:
# enable deterministic behavior for gradient checkpointing
__UpperCamelCase =self.prepare_init_args_and_inputs_for_common()
__UpperCamelCase =self.model_class(**A_ )
model.to(A_ )
assert not model.is_gradient_checkpointing and model.training
__UpperCamelCase =model(**A_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
__UpperCamelCase =torch.randn_like(A_ )
__UpperCamelCase =(out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
__UpperCamelCase =self.model_class(**A_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(A_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
__UpperCamelCase =model_a(**A_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
__UpperCamelCase =(out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1E-5 )
__UpperCamelCase =dict(model.named_parameters() )
__UpperCamelCase =dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=A_ )
self.assertIsNotNone(A_ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(A_ )
__UpperCamelCase =model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
__UpperCamelCase =model.to(A_ )
model.eval()
if torch_device == "mps":
__UpperCamelCase =torch.manual_seed(0 )
else:
__UpperCamelCase =torch.Generator(device=A_ ).manual_seed(0 )
__UpperCamelCase =torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
__UpperCamelCase =image.to(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ , sample_posterior=A_ , generator=A_ ).sample
__UpperCamelCase =output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
__UpperCamelCase =torch.tensor(
[
-4.0078E-01,
-3.8323E-04,
-1.2681E-01,
-1.1462E-01,
2.0095E-01,
1.0893E-01,
-8.8247E-02,
-3.0361E-01,
-9.8644E-03,
] )
elif torch_device == "cpu":
__UpperCamelCase =torch.tensor(
[-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] )
else:
__UpperCamelCase =torch.tensor(
[-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] )
self.assertTrue(torch_all_close(A_ , A_ , rtol=1E-2 ) )
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self , A_ , A_ ) -> str:
return f'gaussian_noise_s={seed}_shape={"_".join([str(A_ ) for s in shape] )}.npy'
def _a ( self ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self , A_=0 , A_=(4, 3, 512, 512) , A_=False ) -> Dict:
__UpperCamelCase =torch.floataa if fpaa else torch.floataa
__UpperCamelCase =torch.from_numpy(load_hf_numpy(self.get_file_format(A_ , A_ ) ) ).to(A_ ).to(A_ )
return image
def _a ( self , A_="CompVis/stable-diffusion-v1-4" , A_=False ) -> List[Any]:
__UpperCamelCase ="fp16" if fpaa else None
__UpperCamelCase =torch.floataa if fpaa else torch.floataa
__UpperCamelCase =AutoencoderKL.from_pretrained(
A_ , subfolder='vae' , torch_dtype=A_ , revision=A_ , )
model.to(A_ ).eval()
return model
def _a ( self , A_=0 ) -> Optional[Any]:
if torch_device == "mps":
return torch.manual_seed(A_ )
return torch.Generator(device=A_ ).manual_seed(A_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _a ( self , A_ , A_ , A_ ) -> int:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ )
__UpperCamelCase =self.get_generator(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ , generator=A_ , sample_posterior=A_ ).sample
assert sample.shape == image.shape
__UpperCamelCase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
__UpperCamelCase =torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(A_ , A_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]],
[47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , A_ , A_ ) -> str:
__UpperCamelCase =self.get_sd_vae_model(fpaa=A_ )
__UpperCamelCase =self.get_sd_image(A_ , fpaa=A_ )
__UpperCamelCase =self.get_generator(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ , generator=A_ , sample_posterior=A_ ).sample
assert sample.shape == image.shape
__UpperCamelCase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]],
[47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]],
# fmt: on
] )
def _a ( self , A_ , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ )
with torch.no_grad():
__UpperCamelCase =model(A_ ).sample
assert sample.shape == image.shape
__UpperCamelCase =sample[-1, -2:, -2:, :2].flatten().float().cpu()
__UpperCamelCase =torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(A_ , A_ , atol=3E-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]],
[37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__UpperCamelCase =sample[-1, -2:, :2, -2:].flatten().cpu()
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=1E-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]],
[16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]],
# fmt: on
] )
@require_torch_gpu
def _a ( self , A_ , A_ ) -> int:
__UpperCamelCase =self.get_sd_vae_model(fpaa=A_ )
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) , fpaa=A_ )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
__UpperCamelCase =sample[-1, -2:, :2, -2:].flatten().float().cpu()
__UpperCamelCase =torch.tensor(A_ )
assert torch_all_close(A_ , A_ , atol=5E-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def _a ( self , A_ ) -> str:
__UpperCamelCase =self.get_sd_vae_model(fpaa=A_ )
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) , fpaa=A_ )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(A_ , A_ , atol=1E-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.' )
def _a ( self , A_ ) -> Any:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ , shape=(3, 4, 64, 64) )
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
__UpperCamelCase =model.decode(A_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(A_ , A_ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]],
[47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]],
# fmt: on
] )
def _a ( self , A_ , A_ ) -> List[Any]:
__UpperCamelCase =self.get_sd_vae_model()
__UpperCamelCase =self.get_sd_image(A_ )
__UpperCamelCase =self.get_generator(A_ )
with torch.no_grad():
__UpperCamelCase =model.encode(A_ ).latent_dist
__UpperCamelCase =dist.sample(generator=A_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
__UpperCamelCase =sample[0, -1, -3:, -3:].flatten().cpu()
__UpperCamelCase =torch.tensor(A_ )
__UpperCamelCase =3E-3 if torch_device != "mps" else 1E-2
assert torch_all_close(A_ , A_ , atol=A_ )
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase =F'{sampling_rate}'
__UpperCamelCase ='1'
__UpperCamelCase ='f32le'
__UpperCamelCase =[
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(a__ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__UpperCamelCase =ffmpeg_process.communicate(a__ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
__UpperCamelCase =output_stream[0]
__UpperCamelCase =np.frombuffer(a__ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] = "f32le" , ):
__UpperCamelCase =F'{sampling_rate}'
__UpperCamelCase ='1'
if format_for_conversion == "s16le":
__UpperCamelCase =2
elif format_for_conversion == "f32le":
__UpperCamelCase =4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
__UpperCamelCase =platform.system()
if system == "Linux":
__UpperCamelCase ='alsa'
__UpperCamelCase ='default'
elif system == "Darwin":
__UpperCamelCase ='avfoundation'
__UpperCamelCase =':0'
elif system == "Windows":
__UpperCamelCase ='dshow'
__UpperCamelCase ='default'
__UpperCamelCase =[
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
__UpperCamelCase =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__UpperCamelCase =_ffmpeg_stream(a__ , a__ )
for item in iterator:
yield item
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple = None , SCREAMING_SNAKE_CASE__ : int = None , SCREAMING_SNAKE_CASE__ : List[str] = "f32le" , ):
if stream_chunk_s is not None:
__UpperCamelCase =stream_chunk_s
else:
__UpperCamelCase =chunk_length_s
__UpperCamelCase =ffmpeg_microphone(a__ , a__ , format_for_conversion=a__ )
if format_for_conversion == "s16le":
__UpperCamelCase =np.intaa
__UpperCamelCase =2
elif format_for_conversion == "f32le":
__UpperCamelCase =np.floataa
__UpperCamelCase =4
else:
raise ValueError(F'Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`' )
if stride_length_s is None:
__UpperCamelCase =chunk_length_s / 6
__UpperCamelCase =int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(a__ , (int, float) ):
__UpperCamelCase =[stride_length_s, stride_length_s]
__UpperCamelCase =int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__UpperCamelCase =int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__UpperCamelCase =datetime.datetime.now()
__UpperCamelCase =datetime.timedelta(seconds=a__ )
for item in chunk_bytes_iter(a__ , a__ , stride=(stride_left, stride_right) , stream=a__ ):
# Put everything back in numpy scale
__UpperCamelCase =np.frombuffer(item['raw'] , dtype=a__ )
__UpperCamelCase =(
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
__UpperCamelCase =sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] = False ):
__UpperCamelCase =b''
__UpperCamelCase , __UpperCamelCase =stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F'Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}' )
__UpperCamelCase =0
for raw in iterator:
acc += raw
if stream and len(a__ ) < chunk_len:
__UpperCamelCase =(_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(a__ ) >= chunk_len:
# We are flushing the accumulator
__UpperCamelCase =(_stride_left, stride_right)
__UpperCamelCase ={'raw': acc[:chunk_len], 'stride': stride}
if stream:
__UpperCamelCase =False
yield item
__UpperCamelCase =stride_left
__UpperCamelCase =acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(a__ ) > stride_left:
__UpperCamelCase ={'raw': acc, 'stride': (_stride_left, 0)}
if stream:
__UpperCamelCase =False
yield item
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =2**24 # 16Mo
try:
with subprocess.Popen(a__ , stdout=subprocess.PIPE , bufsize=a__ ) as ffmpeg_process:
while True:
__UpperCamelCase =ffmpeg_process.stdout.read(a__ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_A = 'sshleifer/bart-tiny-random'
_A = 'patrickvonplaten/t5-tiny-random'
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Any:
return AutoConfig.from_pretrained(lowerCAmelCase_ )
def _a ( self ) -> List[str]:
__UpperCamelCase , *__UpperCamelCase =create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _a ( self ) -> Any:
__UpperCamelCase , *__UpperCamelCase =create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase_ )
def _a ( self ) -> Dict:
__UpperCamelCase , *__UpperCamelCase =create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=lowerCAmelCase_ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _a ( self ) -> Dict:
__UpperCamelCase , *__UpperCamelCase =create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _a ( self ) -> Dict:
with self.assertRaises(lowerCAmelCase_ ):
create_student_by_copying_alternating_layers(lowerCAmelCase_ , tempfile.mkdtemp() , e=lowerCAmelCase_ , d=lowerCAmelCase_ )
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
return "".join(sorted(SCREAMING_SNAKE_CASE__ ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
return word_by_signature[signature(SCREAMING_SNAKE_CASE__ )]
_A = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
_A = sorted({word.strip().lower() for word in data.splitlines()})
_A = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
_A = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__UpperCamelCase =1
__UpperCamelCase =1
while repunit:
__UpperCamelCase =(10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] = 1_00_00_00 ):
__UpperCamelCase =limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE__ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f"""{solution() = }""")
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
'configuration_bert': ['BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BertConfig', 'BertOnnxConfig'],
'tokenization_bert': ['BasicTokenizer', 'BertTokenizer', 'WordpieceTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['BertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BertForMaskedLM',
'BertForMultipleChoice',
'BertForNextSentencePrediction',
'BertForPreTraining',
'BertForQuestionAnswering',
'BertForSequenceClassification',
'BertForTokenClassification',
'BertLayer',
'BertLMHeadModel',
'BertModel',
'BertPreTrainedModel',
'load_tf_weights_in_bert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFBertEmbeddings',
'TFBertForMaskedLM',
'TFBertForMultipleChoice',
'TFBertForNextSentencePrediction',
'TFBertForPreTraining',
'TFBertForQuestionAnswering',
'TFBertForSequenceClassification',
'TFBertForTokenClassification',
'TFBertLMHeadModel',
'TFBertMainLayer',
'TFBertModel',
'TFBertPreTrainedModel',
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['TFBertTokenizer']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'FlaxBertForCausalLM',
'FlaxBertForMaskedLM',
'FlaxBertForMultipleChoice',
'FlaxBertForNextSentencePrediction',
'FlaxBertForPreTraining',
'FlaxBertForQuestionAnswering',
'FlaxBertForSequenceClassification',
'FlaxBertForTokenClassification',
'FlaxBertModel',
'FlaxBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class UpperCAmelCase__ :
def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =seq_length
__UpperCamelCase =is_training
__UpperCamelCase =use_input_mask
__UpperCamelCase =use_token_type_ids
__UpperCamelCase =use_labels
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =type_vocab_size
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =num_labels
__UpperCamelCase =num_choices
__UpperCamelCase =scope
def _a ( self ) -> Any:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self ) -> List[Any]:
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=snake_case_ , )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =FalconModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase =model(snake_case_ , attention_mask=snake_case_ )
__UpperCamelCase =model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =True
__UpperCamelCase =FalconModel(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , )
__UpperCamelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , )
__UpperCamelCase =model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =FalconForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Tuple:
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =FalconForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
__UpperCamelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , use_cache=snake_case_ , )
__UpperCamelCase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__UpperCamelCase =ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__UpperCamelCase =torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCamelCase =torch.cat([input_mask, next_mask] , dim=-1 )
__UpperCamelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , output_hidden_states=snake_case_ , )["hidden_states"][0]
__UpperCamelCase =model(
snake_case_ , attention_mask=snake_case_ , encoder_hidden_states=snake_case_ , encoder_attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )["hidden_states"][0]
# select random slice
__UpperCamelCase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCamelCase =output_from_no_past[:, -3:, random_slice_idx].detach()
__UpperCamelCase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1E-3 ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
__UpperCamelCase
) =config_and_inputs
__UpperCamelCase ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( _snake_case , _snake_case , _snake_case , unittest.TestCase ):
UpperCAmelCase__ : str = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : Dict = (FalconForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : str = (
{
"""feature-extraction""": FalconModel,
"""text-classification""": FalconForSequenceClassification,
"""text-generation""": FalconForCausalLM,
"""question-answering""": FalconForQuestionAnswering,
"""token-classification""": FalconForTokenClassification,
"""zero-shot""": FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> int:
__UpperCamelCase =FalconModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=snake_case_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
__UpperCamelCase =alibi
self.model_tester.create_and_check_model(snake_case_ , *snake_case_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase =input_dict["input_ids"]
__UpperCamelCase =input_ids.ne(1 ).to(snake_case_ )
__UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase =FalconForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase ="single_label_classification"
__UpperCamelCase =input_dict["input_ids"]
__UpperCamelCase =input_ids.ne(1 ).to(snake_case_ )
__UpperCamelCase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
__UpperCamelCase =FalconForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =input_dict["input_ids"]
__UpperCamelCase =FalconForCausalLM(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase =model(snake_case_ , use_cache=snake_case_ )
__UpperCamelCase =input_ids.shape[0]
__UpperCamelCase =model._convert_to_rw_cache(result.past_key_values )
__UpperCamelCase =model._convert_cache_to_standard_format(snake_case_ , snake_case_ )
for layer in range(len(snake_case_ ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =3
__UpperCamelCase ="multi_label_classification"
__UpperCamelCase =input_dict["input_ids"]
__UpperCamelCase =input_ids.ne(1 ).to(snake_case_ )
__UpperCamelCase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
__UpperCamelCase =FalconForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__UpperCamelCase =model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self ) -> int:
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(snake_case_ , 'use_cache' ):
return
__UpperCamelCase =model_class(snake_case_ ).to(snake_case_ )
if "use_cache" not in inputs:
__UpperCamelCase =True
__UpperCamelCase =model(**snake_case_ )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
__UpperCamelCase =(
getattr(snake_case_ , 'decoder_layers' , snake_case_ )
or getattr(snake_case_ , 'num_decoder_layers' , snake_case_ )
or config.num_hidden_layers
)
__UpperCamelCase =getattr(snake_case_ , 'num_kv_heads' , config.num_attention_heads )
__UpperCamelCase =getattr(snake_case_ , 'd_model' , config.hidden_size )
__UpperCamelCase =embed_dim // num_attention_heads
__UpperCamelCase =outputs["past_key_values"]
self.assertEqual(len(snake_case_ ) , snake_case_ )
__UpperCamelCase =inputs["input_ids"].shape
for i in range(snake_case_ ):
if config.new_decoder_architecture:
__UpperCamelCase =config.num_attention_heads
elif config.multi_query:
__UpperCamelCase =1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class UpperCAmelCase__ ( unittest.TestCase ):
@slow
def _a ( self ) -> Dict:
__UpperCamelCase =AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
__UpperCamelCase =FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(snake_case_ )
__UpperCamelCase =tokenizer('My favorite food is' , return_tensors='pt' ).to(snake_case_ )
__UpperCamelCase =(
"My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."
)
__UpperCamelCase =model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=19 )
__UpperCamelCase =tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ , snake_case_ )
@slow
def _a ( self ) -> Dict:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
__UpperCamelCase =AutoTokenizer.from_pretrained(snake_case_ )
__UpperCamelCase =FalconForCausalLM.from_pretrained(snake_case_ )
model.eval()
model.to(snake_case_ )
__UpperCamelCase =tokenizer('My favorite food is' , return_tensors='pt' ).to(snake_case_ )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=4 )
model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=4 )
model.generate(**snake_case_ , num_beams=2 , max_new_tokens=4 )
@slow
def _a ( self ) -> Any:
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
__UpperCamelCase =AutoTokenizer.from_pretrained(snake_case_ )
__UpperCamelCase =FalconForCausalLM.from_pretrained(snake_case_ )
model.eval()
model.to(device=snake_case_ )
__UpperCamelCase =tokenizer('My favorite food is' , return_tensors='pt' ).to(snake_case_ )
# Test results are the same with and without cache
__UpperCamelCase =model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=20 , use_cache=snake_case_ )
__UpperCamelCase =model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=20 , use_cache=snake_case_ )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ['ChineseCLIPFeatureExtractor']
_A = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {'vocab_file': 'sentencepiece.bpe.model'}
_A = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
_A = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
_A = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class UpperCAmelCase__ ( __UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = VOCAB_FILES_NAMES
UpperCAmelCase__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : Dict = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Union[str, Any] = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_="<s>" , A_="</s>" , A_="</s>" , A_="<s>" , A_="<unk>" , A_="<pad>" , A_="<mask>" , A_=None , A_=None , A_=None , A_ = None , A_=None , **A_ , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
__UpperCamelCase =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenizer_file=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , additional_special_tokens=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
__UpperCamelCase =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
__UpperCamelCase ={'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
__UpperCamelCase =1
__UpperCamelCase =len(self.sp_model )
__UpperCamelCase ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_lowerCamelCase )
}
__UpperCamelCase ={v: k for k, v in self.lang_code_to_id.items()}
__UpperCamelCase =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
__UpperCamelCase ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
__UpperCamelCase =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
__UpperCamelCase =src_lang if src_lang is not None else 'en_XX'
__UpperCamelCase =self.lang_code_to_id[self._src_lang]
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Optional[Any]:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
__UpperCamelCase =self.sp_model.serialized_model_proto()
return state
def __setstate__( self , A_ ) -> Tuple:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _a ( self ) -> List[Any]:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _a ( self ) -> Optional[Any]:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ , A_ = None , A_ = False ) -> List[Any]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_lowerCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_lowerCamelCase )) + ([0] * len(_lowerCamelCase )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[Any]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self , A_ , A_ = None ) -> List[str]:
__UpperCamelCase =[self.sep_token_id]
__UpperCamelCase =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self , A_ , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
__UpperCamelCase =self.convert_tokens_to_ids(_lowerCamelCase )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
__UpperCamelCase ={self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self , A_ ) -> Tuple:
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def _a ( self , A_ ) -> Any:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__UpperCamelCase =self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _a ( self , A_ ) -> Optional[int]:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _a ( self , A_ ) -> int:
__UpperCamelCase =''.join(_lowerCamelCase ).replace(_lowerCamelCase , ' ' ).strip()
return out_string
def _a ( self , A_ , A_ = None ) -> str:
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
__UpperCamelCase =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
def _a ( self , A_ , A_ = "en_XX" , A_ = None , A_ = "ro_RO" , **A_ , ) -> List[str]:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
return super().prepare_seqaseq_batch(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _a ( self ) -> Tuple:
return self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> List[str]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.lang_code_to_id[src_lang]
__UpperCamelCase =[]
__UpperCamelCase =[self.eos_token_id, self.cur_lang_code]
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =self.lang_code_to_id[lang]
__UpperCamelCase =[]
__UpperCamelCase =[self.eos_token_id, self.cur_lang_code]
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
_A = logging.getLogger()
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
parser.add_argument('-f' )
__UpperCamelCase =parser.parse_args()
return args.f
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase ={}
__UpperCamelCase =os.path.join(lowerCamelCase_ , 'all_results.json' )
if os.path.exists(lowerCamelCase_ ):
with open(lowerCamelCase_ , 'r' ) as f:
__UpperCamelCase =json.load(lowerCamelCase_ )
else:
raise ValueError(F'can\'t find {path}' )
return results
def _UpperCAmelCase ( ):
__UpperCamelCase =torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
_A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
@classmethod
def _a ( cls ) -> Optional[int]:
__UpperCamelCase =tempfile.mkdtemp()
__UpperCamelCase =os.path.join(cls.tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
__UpperCamelCase =['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _a ( cls ) -> Dict:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> Any:
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'glue_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
self.assertLess(result['perplexity'] , 100 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'clm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> int:
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
self.assertLess(result['perplexity'] , 42 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'mlm_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> List[str]:
__UpperCamelCase =7 if get_gpu_count() > 1 else 2
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
self.assertLess(result['train_loss'] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'ner_no_trainer' ) ) )
@unittest.skip(reason='Fix me @muellerzr' )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['eval_f1'] , 28 )
self.assertGreaterEqual(result['eval_exact'] , 28 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'qa_no_trainer' ) ) )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> str:
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
self.assertGreaterEqual(result['eval_accuracy'] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'swag_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
self.assertGreaterEqual(result['eval_rouge1'] , 10 )
self.assertGreaterEqual(result['eval_rouge2'] , 2 )
self.assertGreaterEqual(result['eval_rougeL'] , 7 )
self.assertGreaterEqual(result['eval_rougeLsum'] , 7 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'summarization_no_trainer' ) ) )
@slow
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '.split()
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
self.assertGreaterEqual(result['eval_bleu'] , 30 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'epoch_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'translation_no_trainer' ) ) )
@slow
def _a ( self ) -> Dict:
__UpperCamelCase =logging.StreamHandler(sys.stdout )
logger.addHandler(__lowerCamelCase )
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '.split()
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
self.assertGreaterEqual(result['eval_overall_accuracy'] , 0.10 )
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def _a ( self ) -> int:
__UpperCamelCase =self.get_auto_remove_tmp_dir()
__UpperCamelCase =f'\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '.split()
if is_cuda_and_apex_available():
testargs.append('--fp16' )
run_command(self._launch_args + testargs )
__UpperCamelCase =get_results(__lowerCamelCase )
# The base model scores a 25%
self.assertGreaterEqual(result['eval_accuracy'] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'step_1' ) ) )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , 'image_classification_no_trainer' ) ) )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_A = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ ( UpperCAmelCase_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."} )
UpperCAmelCase__ : Optional[int] = field(default=UpperCAmelCase_ , metadata={"help": "Whether to SortishSamler or not."} )
UpperCAmelCase__ : Dict = field(
default=UpperCAmelCase_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : str = field(default=UpperCAmelCase_ , metadata={"help": "whether to use adafactor"} )
UpperCAmelCase__ : Tuple = field(
default=UpperCAmelCase_ , metadata={"help": "Encoder layer dropout probability. Goes into model.config."} )
UpperCAmelCase__ : Dict = field(
default=UpperCAmelCase_ , metadata={"help": "Decoder layer dropout probability. Goes into model.config."} )
UpperCAmelCase__ : Optional[int] = field(default=UpperCAmelCase_ , metadata={"help": "Dropout probability. Goes into model.config."} )
UpperCAmelCase__ : str = field(
default=UpperCAmelCase_ , metadata={"help": "Attention dropout probability. Goes into model.config."} )
UpperCAmelCase__ : Optional[int] = field(
default="linear" , metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}"} , )
| 703 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> Tuple:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Tuple:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> int:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Tuple = (
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : Tuple = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Optional[Any]:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> str:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Tuple:
self.config_tester.run_common_tests()
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Dict:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : int = 1e-4
def _a ( self ) -> int:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> int:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = 1e-4
def _a ( self ) -> List[Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
| 682 | 0 |
from __future__ import annotations
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =[]
__UpperCamelCase =[]
__UpperCamelCase =0
__UpperCamelCase =sum(__SCREAMING_SNAKE_CASE )
create_state_space_tree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return result
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , ):
if sum(__SCREAMING_SNAKE_CASE ) > max_sum or (remaining_nums_sum + sum(__SCREAMING_SNAKE_CASE )) < max_sum:
return
if sum(__SCREAMING_SNAKE_CASE ) == max_sum:
result.append(__SCREAMING_SNAKE_CASE )
return
for index in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ):
create_state_space_tree(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 , [*path, nums[index]] , __SCREAMING_SNAKE_CASE , remaining_nums_sum - nums[index] , )
_A = [3, 34, 4, 12, 5, 2]
_A = 9
_A = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 704 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ = None ) -> None:
if components is None:
__UpperCamelCase =[]
__UpperCamelCase =list(A_ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(A_ , self.__components ) ) + ")"
def __add__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] + other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else:
raise Exception('must have the same size' )
def __sub__( self , A_ ) -> Vector:
__UpperCamelCase =len(self )
if size == len(A_ ):
__UpperCamelCase =[self.__components[i] - other.component(A_ ) for i in range(A_ )]
return Vector(A_ )
else: # error case
raise Exception('must have the same size' )
@overload
def __mul__( self , A_ ) -> Vector:
...
@overload
def __mul__( self , A_ ) -> float:
...
def __mul__( self , A_ ) -> float | Vector:
if isinstance(A_ , (float, int) ):
__UpperCamelCase =[c * other for c in self.__components]
return Vector(A_ )
elif isinstance(A_ , A_ ) and len(self ) == len(A_ ):
__UpperCamelCase =len(self )
__UpperCamelCase =[self.__components[i] * other.component(A_ ) for i in range(A_ )]
return sum(A_ )
else: # error case
raise Exception('invalid operand!' )
def _a ( self ) -> Vector:
return Vector(self.__components )
def _a ( self , A_ ) -> float:
if isinstance(A_ , A_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception('index out of range' )
def _a ( self , A_ , A_ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__UpperCamelCase =value
def _a ( self ) -> float:
if len(self.__components ) == 0:
raise Exception('Vector is empty' )
__UpperCamelCase =[c**2 for c in self.__components]
return math.sqrt(sum(A_ ) )
def _a ( self , A_ , A_ = False ) -> float:
__UpperCamelCase =self * other
__UpperCamelCase =self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return Vector([0] * dimension )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and (isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ))
__UpperCamelCase =[0] * dimension
__UpperCamelCase =1
return Vector(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : Vector , SCREAMING_SNAKE_CASE__ : Vector ):
assert (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
and (isinstance(SCREAMING_SNAKE_CASE__ , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )]
return Vector(SCREAMING_SNAKE_CASE__ )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_ , A_ ) -> None:
__UpperCamelCase =matrix
__UpperCamelCase =w
__UpperCamelCase =h
def __str__( self ) -> str:
__UpperCamelCase =''
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] + other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrix must have the same dimension!' )
def __sub__( self , A_ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__UpperCamelCase =[]
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] - other.component(A_ , A_ )
for j in range(self.__width )
]
matrix.append(A_ )
return Matrix(A_ , self.__width , self.__height )
else:
raise Exception('matrices must have the same dimension!' )
@overload
def __mul__( self , A_ ) -> Matrix:
...
@overload
def __mul__( self , A_ ) -> Vector:
...
def __mul__( self , A_ ) -> Vector | Matrix:
if isinstance(A_ , A_ ): # matrix-vector
if len(A_ ) == self.__width:
__UpperCamelCase =zero_vector(self.__height )
for i in range(self.__height ):
__UpperCamelCase =[
self.__matrix[i][j] * other.component(A_ )
for j in range(self.__width )
]
ans.change_component(A_ , sum(A_ ) )
return ans
else:
raise Exception(
'vector must have the same size as the '
'number of columns of the matrix!' )
elif isinstance(A_ , (int, float) ): # matrix-scalar
__UpperCamelCase =[
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(A_ , self.__width , self.__height )
return None
def _a ( self ) -> int:
return self.__height
def _a ( self ) -> int:
return self.__width
def _a ( self , A_ , A_ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ , A_ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__UpperCamelCase =value
else:
raise Exception('change_component: indices out of bounds' )
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
__UpperCamelCase =self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(A_ ) ):
__UpperCamelCase =minor[i][:y] + minor[i][y + 1 :]
return Matrix(A_ , self.__width - 1 , self.__height - 1 ).determinant()
def _a ( self , A_ , A_ ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(A_ , A_ )
else:
raise Exception('Indices out of bounds' )
def _a ( self ) -> float:
if self.__height != self.__width:
raise Exception('Matrix is not square' )
if self.__height < 1:
raise Exception('Matrix has no element' )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__UpperCamelCase =[
self.__matrix[0][y] * self.cofactor(0 , A_ ) for y in range(self.__width )
]
return sum(A_ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =[[0] * n for _ in range(SCREAMING_SNAKE_CASE__ )]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
random.seed(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[
[random.randint(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for _ in range(SCREAMING_SNAKE_CASE__ )] for _ in range(SCREAMING_SNAKE_CASE__ )
]
return Matrix(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 682 | 0 |
'''simple docstring'''
from PIL import Image
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase , __UpperCamelCase =image.size
__UpperCamelCase =0
__UpperCamelCase =image.load()
for i in range(SCREAMING_SNAKE_CASE__ ):
for j in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =pixels[j, i]
mean += pixel
mean //= width * height
for j in range(SCREAMING_SNAKE_CASE__ ):
for i in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =2_55 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_A = mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 705 |
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_A = logging.get_logger(__name__)
_A = '▁'
_A = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
_A = {
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
_A = {
'facebook/m2m100_418M': 1024,
}
# fmt: off
_A = {
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : Any = ["input_ids", "attention_mask"]
UpperCAmelCase__ : List[int] = []
UpperCAmelCase__ : List[int] = []
def __init__( self , A_ , A_ , A_=None , A_=None , A_="<s>" , A_="</s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="m2m100" , A_ = None , A_=8 , **A_ , ) -> None:
__UpperCamelCase ={} if sp_model_kwargs is None else sp_model_kwargs
__UpperCamelCase =language_codes
__UpperCamelCase =FAIRSEQ_LANGUAGE_CODES[language_codes]
__UpperCamelCase ={lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
__UpperCamelCase =kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A_ )
for lang_code in fairseq_language_code
if self.get_lang_token(A_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A_ , tgt_lang=A_ , bos_token=A_ , eos_token=A_ , sep_token=A_ , unk_token=A_ , pad_token=A_ , language_codes=A_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A_ , **A_ , )
__UpperCamelCase =vocab_file
__UpperCamelCase =load_json(A_ )
__UpperCamelCase ={v: k for k, v in self.encoder.items()}
__UpperCamelCase =spm_file
__UpperCamelCase =load_spm(A_ , self.sp_model_kwargs )
__UpperCamelCase =len(self.encoder )
__UpperCamelCase ={
self.get_lang_token(A_ ): self.encoder_size + i for i, lang_code in enumerate(A_ )
}
__UpperCamelCase ={lang_code: self.encoder_size + i for i, lang_code in enumerate(A_ )}
__UpperCamelCase ={v: k for k, v in self.lang_token_to_id.items()}
__UpperCamelCase =src_lang if src_lang is not None else 'en'
__UpperCamelCase =tgt_lang
__UpperCamelCase =self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
__UpperCamelCase =num_madeup_words
@property
def _a ( self ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _a ( self ) -> str:
return self._src_lang
@src_lang.setter
def _a ( self , A_ ) -> None:
__UpperCamelCase =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _a ( self , A_ ) -> List[str]:
return self.sp_model.encode(A_ , out_type=A_ )
def _a ( self , A_ ) -> Optional[Any]:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A_ , self.encoder[self.unk_token] )
def _a ( self , A_ ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A_ , self.unk_token )
def _a ( self , A_ ) -> List[Any]:
__UpperCamelCase =[]
__UpperCamelCase =''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
__UpperCamelCase =[]
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _a ( self , A_ , A_ = None , A_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
__UpperCamelCase =[1] * len(self.prefix_tokens )
__UpperCamelCase =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A_ )) + suffix_ones
return prefix_ones + ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _a ( self , A_ , A_ = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _a ( self ) -> Dict:
__UpperCamelCase ={self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Dict:
__UpperCamelCase =self.__dict__.copy()
__UpperCamelCase =None
return state
def __setstate__( self , A_ ) -> None:
__UpperCamelCase =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__UpperCamelCase ={}
__UpperCamelCase =load_spm(self.spm_file , self.sp_model_kwargs )
def _a ( self , A_ , A_ = None ) -> Tuple[str]:
__UpperCamelCase =Path(A_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__UpperCamelCase =save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , A_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(A_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A_ )
elif not os.path.isfile(self.spm_file ):
with open(A_ , 'wb' ) as fi:
__UpperCamelCase =self.sp_model.serialized_model_proto()
fi.write(A_ )
return (str(A_ ), str(A_ ))
def _a ( self , A_ , A_ = "en" , A_ = None , A_ = "ro" , **A_ , ) -> BatchEncoding:
__UpperCamelCase =src_lang
__UpperCamelCase =tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A_ , A_ , **A_ )
def _a ( self , A_ , A_ , A_ , **A_ ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
__UpperCamelCase =src_lang
__UpperCamelCase =self(A_ , add_special_tokens=A_ , **A_ )
__UpperCamelCase =self.get_lang_id(A_ )
__UpperCamelCase =tgt_lang_id
return inputs
def _a ( self ) -> List[Any]:
self.set_src_lang_special_tokens(self.src_lang )
def _a ( self ) -> Dict:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> None:
__UpperCamelCase =self.get_lang_token(A_ )
__UpperCamelCase =self.lang_token_to_id[lang_token]
__UpperCamelCase =[self.cur_lang_id]
__UpperCamelCase =[self.eos_token_id]
def _a ( self , A_ ) -> str:
return self.lang_code_to_token[lang]
def _a ( self , A_ ) -> int:
__UpperCamelCase =self.get_lang_token(A_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict[str, Any] ):
__UpperCamelCase =sentencepiece.SentencePieceProcessor(**SCREAMING_SNAKE_CASE__ )
spm.Load(str(SCREAMING_SNAKE_CASE__ ) )
return spm
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'r' ) as f:
return json.load(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ):
with open(SCREAMING_SNAKE_CASE__ , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , indent=2 )
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 7 , SCREAMING_SNAKE_CASE__ : int = 1_00_00_00 ):
__UpperCamelCase =0
__UpperCamelCase =1
for current_denominator in range(1 , limit + 1 ):
__UpperCamelCase =current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__UpperCamelCase =current_numerator
__UpperCamelCase =current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 706 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =original_name.split('.' )[0]
__UpperCamelCase =key.split('.' )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 2] )
__UpperCamelCase =int(key_list[key_list.index(SCREAMING_SNAKE_CASE__ ) - 1] )
__UpperCamelCase =orig_block_num - offset
__UpperCamelCase =key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =OrderedDict()
__UpperCamelCase , __UpperCamelCase =0, 0
for key, value in state_dict.items():
if key.startswith('network' ):
__UpperCamelCase =key.replace('network' , 'poolformer.encoder' )
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith('bias' ) and "patch_embed" not in key:
patch_emb_offset += 1
__UpperCamelCase =key[: key.find('proj' )]
__UpperCamelCase =key.replace(SCREAMING_SNAKE_CASE__ , F'patch_embeddings.{total_embed_found}.' )
__UpperCamelCase =key.replace('proj' , 'projection' )
if key.endswith('bias' ):
total_embed_found += 1
if "patch_embeddings" in key:
__UpperCamelCase ='poolformer.encoder.' + key
if "mlp.fc1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc1' , 'output.conv1' )
if "mlp.fc2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'mlp.fc2' , 'output.conv2' )
if "norm1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm1' , 'before_norm' )
if "norm2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'norm2' , 'after_norm' )
if "layer_scale_1" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_1' , 'layer_scale_1' )
if "layer_scale_2" in key:
__UpperCamelCase =replace_key_with_offset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'layer_scale_2' , 'layer_scale_2' )
if "head" in key:
__UpperCamelCase =key.replace('head' , 'classifier' )
__UpperCamelCase =value
return new_state_dict
def _UpperCAmelCase ( ):
__UpperCamelCase ='http://images.cocodataset.org/val2017/000000039769.jpg'
__UpperCamelCase =Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str ):
__UpperCamelCase =PoolFormerConfig()
# set attributes based on model_name
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =model_name[-3:]
__UpperCamelCase =10_00
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =(1, 10_00)
# set config attributes
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
if size == "s12":
__UpperCamelCase =[2, 2, 6, 2]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s24":
__UpperCamelCase =[4, 4, 12, 4]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =0.9
elif size == "s36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[64, 1_28, 3_20, 5_12]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.9
elif size == "m36":
__UpperCamelCase =[6, 6, 18, 6]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
elif size == "m48":
__UpperCamelCase =[8, 8, 24, 8]
__UpperCamelCase =[96, 1_92, 3_84, 7_68]
__UpperCamelCase =4.0
__UpperCamelCase =1E-6
__UpperCamelCase =0.95
else:
raise ValueError(F'Size {size} not supported' )
# load image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
# Prepare image
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
__UpperCamelCase =torch.load(SCREAMING_SNAKE_CASE__ , map_location=torch.device('cpu' ) )
# rename keys
__UpperCamelCase =rename_keys(SCREAMING_SNAKE_CASE__ )
# create HuggingFace model and load state dict
__UpperCamelCase =PoolFormerForImageClassification(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
# Define image processor
__UpperCamelCase =PoolFormerImageProcessor(crop_pct=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =image_processor(images=prepare_img() , return_tensors='pt' ).pixel_values
# forward pass
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =outputs.logits
# define expected logit slices for different models
if size == "s12":
__UpperCamelCase =torch.tensor([-0.3045, -0.6758, -0.4869] )
elif size == "s24":
__UpperCamelCase =torch.tensor([0.4402, -0.1374, -0.8045] )
elif size == "s36":
__UpperCamelCase =torch.tensor([-0.6080, -0.5133, -0.5898] )
elif size == "m36":
__UpperCamelCase =torch.tensor([0.3952, 0.2263, -1.2668] )
elif size == "m48":
__UpperCamelCase =torch.tensor([0.1167, -0.0656, -0.3423] )
else:
raise ValueError(F'Size {size} not supported' )
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_A = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 682 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Any = "rwkv"
UpperCAmelCase__ : Union[str, Any] = {"max_position_embeddings": "context_length"}
def __init__( self , A_=50277 , A_=1024 , A_=4096 , A_=32 , A_=None , A_=None , A_=1E-5 , A_=0 , A_=0 , A_=6 , A_=False , A_=True , **A_ , ) -> Optional[int]:
__UpperCamelCase =vocab_size
__UpperCamelCase =context_length
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =attention_hidden_size if attention_hidden_size is not None else hidden_size
__UpperCamelCase =intermediate_size if intermediate_size is not None else 4 * hidden_size
__UpperCamelCase =layer_norm_epsilon
__UpperCamelCase =rescale_every
__UpperCamelCase =use_cache
__UpperCamelCase =bos_token_id
__UpperCamelCase =eos_token_id
super().__init__(
tie_word_embeddings=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
| 707 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 637_8137
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =(AXIS_A - AXIS_B) / AXIS_A
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE__ ) ) )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =radians(SCREAMING_SNAKE_CASE__ )
# Equation
__UpperCamelCase =sin((phi_a - phi_a) / 2 )
__UpperCamelCase =sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__UpperCamelCase =sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE__ ) * cos(SCREAMING_SNAKE_CASE__ ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_A = version.parse(importlib_metadata.version('nltk'))
if NLTK_VERSION >= version.Version('3.6.4'):
from nltk import word_tokenize
_A = '\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n'
_A = '\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n'
_A = '\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n \'meteor\': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric(\'meteor\')\n >>> predictions = ["It is a guide to action which ensures that the military always obeys the commands of the party"]\n >>> references = ["It is a guide to action that ensures that the military will forever heed Party commands"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results["meteor"], 4))\n 0.6944\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase__ ( datasets.Metric ):
"""simple docstring"""
def _a ( self ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def _a ( self , A_ ) -> Any:
import nltk
nltk.download('wordnet' )
if NLTK_VERSION >= version.Version('3.6.5' ):
nltk.download('punkt' )
if NLTK_VERSION >= version.Version('3.6.6' ):
nltk.download('omw-1.4' )
def _a ( self , A_ , A_ , A_=0.9 , A_=3 , A_=0.5 ) -> Optional[int]:
if NLTK_VERSION >= version.Version('3.6.5' ):
__UpperCamelCase =[
meteor_score.single_meteor_score(
word_tokenize(A_ ) , word_tokenize(A_ ) , alpha=A_ , beta=A_ , gamma=A_ )
for ref, pred in zip(A_ , A_ )
]
else:
__UpperCamelCase =[
meteor_score.single_meteor_score(A_ , A_ , alpha=A_ , beta=A_ , gamma=A_ )
for ref, pred in zip(A_ , A_ )
]
return {"meteor": np.mean(A_ )}
| 708 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return 1 if input_a == input_a else 0
def _UpperCAmelCase ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 682 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
_A = logging.getLogger(__name__)
_A = tf.data.AUTOTUNE
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=lowerCAmelCase__ , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=lowerCAmelCase__ , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=lowerCAmelCase__ , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=lowerCAmelCase__ , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=lowerCAmelCase__ , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=lowerCAmelCase__ , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=lowerCAmelCase__ , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=lowerCAmelCase__ , default=2**18 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=lowerCAmelCase__ , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=lowerCAmelCase__ , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=lowerCAmelCase__ , default=1E-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=lowerCAmelCase__ , default=1E-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=lowerCAmelCase__ , default=5_12 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=lowerCAmelCase__ , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=lowerCAmelCase__ , help='Model ID to upload to on the Hugging Face Hub.' )
__UpperCamelCase =parser.parse_args()
return args
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] ):
try:
if args.tpu_name:
__UpperCamelCase =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
__UpperCamelCase =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase =0
for file in file_list:
__UpperCamelCase =file.split('/' )[-1]
__UpperCamelCase =re.search(r'-\d+-(\d+)\.tfrecord' , lowerCAmelCase__ ).group(1 )
__UpperCamelCase =int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]=None ):
__UpperCamelCase =count_samples(lowerCAmelCase__ )
__UpperCamelCase =tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
__UpperCamelCase =dataset.shuffle(len(lowerCAmelCase__ ) )
__UpperCamelCase =tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
__UpperCamelCase =dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
__UpperCamelCase =dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
__UpperCamelCase =dataset.shuffle(args.shuffle_buffer_size )
__UpperCamelCase =dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
__UpperCamelCase =dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
__UpperCamelCase =dataset.prefetch(lowerCAmelCase__ )
return dataset
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] ):
if not args.no_tpu:
__UpperCamelCase =initialize_tpu(lowerCAmelCase__ )
__UpperCamelCase =tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
__UpperCamelCase =tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
__UpperCamelCase =AutoTokenizer.from_pretrained(args.tokenizer )
__UpperCamelCase =AutoConfig.from_pretrained(args.pretrained_model_config )
__UpperCamelCase =tokenizer.vocab_size
__UpperCamelCase =tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
__UpperCamelCase =tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
__UpperCamelCase =count_samples(lowerCAmelCase__ )
__UpperCamelCase =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
__UpperCamelCase =steps_per_epoch * args.num_epochs
with strategy.scope():
__UpperCamelCase =TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
__UpperCamelCase =create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=['accuracy'] )
def decode_fn(SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
__UpperCamelCase =DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors='tf' )
def mask_with_collator(SCREAMING_SNAKE_CASE__ : Optional[Any] ):
# TF really needs an isin() function
__UpperCamelCase =(
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
__UpperCamelCase =data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
__UpperCamelCase =args.per_replica_batch_size * strategy.num_replicas_in_sync
__UpperCamelCase =prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
__UpperCamelCase =prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
__UpperCamelCase =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
_A = parse_args()
main(args)
| 709 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 0 , SCREAMING_SNAKE_CASE__ : int = 0 ):
__UpperCamelCase =right or len(SCREAMING_SNAKE_CASE__ ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 682 | 0 |
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase__ ( a__ ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = DistilBertTokenizer
UpperCAmelCase__ : Any = DistilBertTokenizerFast
UpperCAmelCase__ : Dict = True
@slow
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
__UpperCamelCase =tokenizer.encode('sequence builders' , add_special_tokens=lowerCamelCase_ )
__UpperCamelCase =tokenizer.encode('multi-sequence build' , add_special_tokens=lowerCamelCase_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(lowerCamelCase_ , lowerCamelCase_ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 710 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682 | 0 |
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
_A = logging.getLogger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : list ):
__UpperCamelCase ='\n'.join(SCREAMING_SNAKE_CASE__ )
Path(SCREAMING_SNAKE_CASE__ ).open('w' ).writelines(SCREAMING_SNAKE_CASE__ )
_A = 'patrickvonplaten/t5-tiny-random'
_A = 'sshleifer/bart-tiny-random'
_A = 'sshleifer/tiny-mbart'
_A = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class UpperCAmelCase__ ( lowercase_ ):
"""simple docstring"""
def _a ( self , A_ ) -> int:
__UpperCamelCase =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__UpperCamelCase =input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__UpperCamelCase =[' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
__UpperCamelCase ='translation_en_to_de' if model == T5_TINY else 'summarization'
__UpperCamelCase =f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(UpperCamelCase__ , 'argv' , UpperCamelCase__ ):
run_generate()
assert Path(UpperCamelCase__ ).exists()
# os.remove(Path(output_file_name))
def _a ( self ) -> List[Any]:
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def _a ( self , A_ ) -> Dict:
self.run_eval_tester(UpperCamelCase__ )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def _a ( self , A_ ) -> str:
__UpperCamelCase =Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
__UpperCamelCase =input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
__UpperCamelCase ={
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
__UpperCamelCase =Path(self.get_auto_remove_tmp_dir() )
__UpperCamelCase =str(tmp_dir / 'scores.json' )
__UpperCamelCase =str(tmp_dir / 'val.target' )
_dump_articles(UpperCamelCase__ , text['en'] )
_dump_articles(UpperCamelCase__ , text['de'] )
__UpperCamelCase ='translation_en_to_de' if model == T5_TINY else 'summarization'
__UpperCamelCase =f'\n run_eval_search.py\n {model}\n {str(UpperCamelCase__ )}\n {str(UpperCamelCase__ )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(UpperCamelCase__ , 'argv' , UpperCamelCase__ ):
with CaptureStdout() as cs:
run_search()
__UpperCamelCase =[' num_beams | length_penalty', model, 'Best score args']
__UpperCamelCase =['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(UpperCamelCase__ )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(UpperCamelCase__ ).exists()
os.remove(Path(UpperCamelCase__ ) )
| 711 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
_A = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : bool = field(default=A_ , metadata={"help": "Whether to use SortishSampler or not."} )
UpperCAmelCase__ : bool = field(
default=A_ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[int] = field(
default=A_ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
UpperCAmelCase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=A_ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def _a ( self ) -> Dict:
__UpperCamelCase =super().to_dict()
for k, v in d.items():
if isinstance(A_ , A_ ):
__UpperCamelCase =v.to_dict()
return d
| 682 | 0 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ):
return (data["data"], data["target"])
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase =XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(__lowerCAmelCase , __lowerCAmelCase )
# Predict target for test data
__UpperCamelCase =xgb.predict(__lowerCAmelCase )
__UpperCamelCase =predictions.reshape(len(__lowerCAmelCase ) , 1 )
return predictions
def _UpperCAmelCase ( ):
__UpperCamelCase =fetch_california_housing()
__UpperCamelCase , __UpperCamelCase =data_handling(__lowerCAmelCase )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase =train_test_split(
__lowerCAmelCase , __lowerCAmelCase , test_size=0.25 , random_state=1 )
__UpperCamelCase =xgboost(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Error printing
print(F'Mean Absolute Error : {mean_absolute_error(__lowerCAmelCase , __lowerCAmelCase )}' )
print(F'Mean Square Error : {mean_squared_error(__lowerCAmelCase , __lowerCAmelCase )}' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 712 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json',
'Salesforce/blip-vqa-capfit-large': (
'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-base': (
'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'
),
'Salesforce/blip-image-captioning-large': (
'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'
),
'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json',
'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json',
'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json',
'Salesforce/blip-itm-large-flikr': (
'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'
),
}
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Dict = "blip_text_model"
def __init__( self , A_=30524 , A_=768 , A_=768 , A_=3072 , A_=768 , A_=12 , A_=8 , A_=512 , A_="gelu" , A_=1E-12 , A_=0.0 , A_=0.0 , A_=0.02 , A_=30522 , A_=2 , A_=0 , A_=102 , A_=True , A_=True , **A_ , ) -> Optional[int]:
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , sep_token_id=A_ , **A_ , )
__UpperCamelCase =vocab_size
__UpperCamelCase =hidden_size
__UpperCamelCase =encoder_hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =max_position_embeddings
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =is_decoder
__UpperCamelCase =use_cache
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "blip_vision_model"
def __init__( self , A_=768 , A_=3072 , A_=512 , A_=12 , A_=12 , A_=384 , A_=16 , A_="gelu" , A_=1E-5 , A_=0.0 , A_=1E-10 , **A_ , ) -> Optional[Any]:
super().__init__(**A_ )
__UpperCamelCase =hidden_size
__UpperCamelCase =intermediate_size
__UpperCamelCase =projection_dim
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =patch_size
__UpperCamelCase =image_size
__UpperCamelCase =initializer_range
__UpperCamelCase =attention_dropout
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =hidden_act
@classmethod
def _a ( cls , A_ , **A_ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(A_ )
__UpperCamelCase , __UpperCamelCase =cls.get_config_dict(A_ , **A_ )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__UpperCamelCase =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A_ , **A_ )
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
UpperCAmelCase__ : int = "blip"
UpperCAmelCase__ : Optional[int] = True
def __init__( self , A_=None , A_=None , A_=512 , A_=2.6592 , A_=256 , **A_ , ) -> Union[str, Any]:
super().__init__(**A_ )
if text_config is None:
__UpperCamelCase ={}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__UpperCamelCase ={}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__UpperCamelCase =BlipTextConfig(**A_ )
__UpperCamelCase =BlipVisionConfig(**A_ )
__UpperCamelCase =self.vision_config.hidden_size
__UpperCamelCase =projection_dim
__UpperCamelCase =logit_scale_init_value
__UpperCamelCase =1.0
__UpperCamelCase =0.02
__UpperCamelCase =image_text_hidden_size
@classmethod
def _a ( cls , A_ , A_ , **A_ ) -> str:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =copy.deepcopy(self.__dict__ )
__UpperCamelCase =self.text_config.to_dict()
__UpperCamelCase =self.vision_config.to_dict()
__UpperCamelCase =self.__class__.model_type
return output
| 682 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' )
__UpperCamelCase ={
'''input_ids''': tf.convert_to_tensor([[0, 2646, 10269, 83, 99942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__UpperCamelCase =model(_UpperCAmelCase )['''last_hidden_state''']
__UpperCamelCase =tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 713 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = RoCBertTokenizer
UpperCAmelCase__ : Optional[Any] = None
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = filter_non_english
def _a ( self ) -> Optional[Any]:
super().setUp()
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__UpperCamelCase ={}
__UpperCamelCase ={}
for i, value in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =i
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(A_ , A_ , ensure_ascii=A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(A_ , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(A_ ) , [5, 6, 2, 5, 7, 8] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def _a ( self ) -> List[Any]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> str:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _a ( self ) -> Any:
__UpperCamelCase =RoCBertBasicTokenizer(do_lower_case=A_ , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__UpperCamelCase ={}
for i, token in enumerate(A_ ):
__UpperCamelCase =i
__UpperCamelCase =RoCBertWordpieceTokenizer(vocab=A_ , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def _a ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _a ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _a ( self ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__UpperCamelCase =self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def _a ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =f'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
__UpperCamelCase =tokenizer_r.encode_plus(
A_ , return_attention_mask=A_ , return_token_type_ids=A_ , return_offsets_mapping=A_ , add_special_tokens=A_ , )
__UpperCamelCase =tokenizer_r.do_lower_case if hasattr(A_ , 'do_lower_case' ) else False
__UpperCamelCase =(
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def _a ( self ) -> List[str]:
__UpperCamelCase =['的', '人', '有']
__UpperCamelCase =''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase =True
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
__UpperCamelCase =False
__UpperCamelCase =self.rust_tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =self.tokenizer_class.from_pretrained(A_ , **A_ )
__UpperCamelCase =tokenizer_r.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_p.encode(A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer_r.convert_ids_to_tokens(A_ )
__UpperCamelCase =tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
__UpperCamelCase =[
f'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ , A_ )
self.assertListEqual(A_ , A_ )
@slow
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__UpperCamelCase =tokenizer.encode('你好' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode('你是谁' , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ )
__UpperCamelCase =tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(f'{tokenizer.__class__.__name__}' ):
__UpperCamelCase ='你好,你是谁'
__UpperCamelCase =tokenizer.tokenize(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_shape_ids(A_ )
__UpperCamelCase =tokenizer.convert_tokens_to_pronunciation_ids(A_ )
__UpperCamelCase =tokenizer.prepare_for_model(
A_ , A_ , A_ , add_special_tokens=A_ )
__UpperCamelCase =tokenizer.encode_plus(A_ , add_special_tokens=A_ )
self.assertEqual(A_ , A_ )
| 682 | 0 |
from __future__ import annotations
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_=None ) -> Tuple:
__UpperCamelCase =data
__UpperCamelCase =None
def __repr__( self ) -> Union[str, Any]:
__UpperCamelCase =[]
__UpperCamelCase =self
while temp:
string_rep.append(f'{temp.data}' )
__UpperCamelCase =temp.next
return "->".join(_A )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
if not elements_list:
raise Exception('The Elements List is empty' )
__UpperCamelCase =__UpperCamelCase =Node(elements_list[0] )
for i in range(1 , len(UpperCAmelCase__ ) ):
__UpperCamelCase =Node(elements_list[i] )
__UpperCamelCase =current.next
return head
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
if head_node is not None and isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def _UpperCAmelCase ( ):
from doctest import testmod
testmod()
__UpperCamelCase =make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(UpperCAmelCase__ )
print('Elements in Reverse:' )
print_reverse(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 714 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_A = random.Random()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str]=1.0 , SCREAMING_SNAKE_CASE__ : Dict=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ):
if rng is None:
__UpperCamelCase =global_rng
__UpperCamelCase =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=400 , A_=2000 , A_=10 , A_=160 , A_=8 , A_=0.0 , A_=4000 , A_=False , A_=True , ) -> Optional[Any]:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =min_seq_length
__UpperCamelCase =max_seq_length
__UpperCamelCase =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCamelCase =padding_value
__UpperCamelCase =sampling_rate
__UpperCamelCase =return_attention_mask
__UpperCamelCase =do_normalize
__UpperCamelCase =feature_size
__UpperCamelCase =chunk_length
__UpperCamelCase =hop_length
def _a ( self ) -> int:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _a ( self , A_=False , A_=False ) -> Any:
def _flatten(A_ ):
return list(itertools.chain(*A_ ) )
if equal_length:
__UpperCamelCase =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__UpperCamelCase =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__UpperCamelCase =[np.asarray(A_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def _a ( self ) -> Optional[int]:
__UpperCamelCase =WhisperFeatureExtractionTester(self )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =feat_extract_first.save_pretrained(A_ )[0]
check_json_file_has_correct_format(A_ )
__UpperCamelCase =self.feature_extraction_class.from_pretrained(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCamelCase =os.path.join(A_ , 'feat_extract.json' )
feat_extract_first.to_json_file(A_ )
__UpperCamelCase =self.feature_extraction_class.from_json_file(A_ )
__UpperCamelCase =feat_extract_first.to_dict()
__UpperCamelCase =feat_extract_second.to_dict()
__UpperCamelCase =feat_extract_first.mel_filters
__UpperCamelCase =feat_extract_second.mel_filters
self.assertTrue(np.allclose(A_ , A_ ) )
self.assertEqual(A_ , A_ )
def _a ( self ) -> Tuple:
# Tests that all call wrap to encode_plus and batch_encode_plus
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
# Test feature size
__UpperCamelCase =feature_extractor(A_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__UpperCamelCase =feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test batched
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__UpperCamelCase =[floats_list((1, x) )[0] for x in (800, 800, 800)]
__UpperCamelCase =np.asarray(A_ )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
# Test truncation required
__UpperCamelCase =[floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs]
__UpperCamelCase =[x[: feature_extractor.n_samples] for x in speech_inputs]
__UpperCamelCase =[np.asarray(A_ ) for speech_input in speech_inputs_truncated]
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(A_ , A_ ):
self.assertTrue(np.allclose(A_ , A_ , atol=1E-3 ) )
def _a ( self ) -> Dict:
import torch
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =np.random.rand(100 , 32 ).astype(np.floataa )
__UpperCamelCase =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__UpperCamelCase =feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _a ( self , A_ ) -> Optional[int]:
__UpperCamelCase =load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
__UpperCamelCase =ds.sort('id' ).select(range(A_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _a ( self ) -> Optional[int]:
# fmt: off
__UpperCamelCase =torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__UpperCamelCase =self._load_datasamples(1 )
__UpperCamelCase =WhisperFeatureExtractor()
__UpperCamelCase =feature_extractor(A_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , A_ , atol=1E-4 ) )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__UpperCamelCase =self._load_datasamples(1 )[0]
__UpperCamelCase =((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__UpperCamelCase =feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=A_ )[0]
self.assertTrue(np.all(np.mean(A_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(A_ ) - 1 ) < 1E-3 ) )
| 682 | 0 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_A = '\\n Text data.\n Second line of data.'
_A = 'file'
@pytest.fixture(scope='session' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / (FILE_PATH + '.zstd')
__UpperCamelCase =bytes(UpperCAmelCase__ , 'utf-8' )
with zstd.open(UpperCAmelCase__ , 'wb' ) as f:
f.write(UpperCAmelCase__ )
return path
@pytest.fixture
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
with open(os.path.join(tmpfs.local_root_dir , UpperCAmelCase__ ) , 'w' ) as f:
f.write(UpperCAmelCase__ )
return FILE_PATH
@pytest.mark.parametrize('compression_format' , ['gzip', 'xz', 'zstd'] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> List[Any]:
__UpperCamelCase ={'gzip': gz_file, 'xz': xz_file, 'zstd': zstd_path}
__UpperCamelCase =input_paths[compression_format]
__UpperCamelCase =tmp_path / 'cache'
__UpperCamelCase =DownloadConfig(cache_dir=UpperCAmelCase__ , extract_compressed_file=UpperCAmelCase__ )
__UpperCamelCase =cached_path(UpperCAmelCase__ , download_config=UpperCAmelCase__ )
with open(UpperCAmelCase__ ) as f:
__UpperCamelCase =f.read()
with open(UpperCAmelCase__ ) as f:
__UpperCamelCase =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('default_extracted' , [True, False] )
@pytest.mark.parametrize('default_cache_dir' , [True, False] )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int ) -> Union[str, Any]:
__UpperCamelCase ='custom_cache'
__UpperCamelCase ='custom_extracted_dir'
__UpperCamelCase =tmp_path / 'custom_extracted_path'
if default_extracted:
__UpperCamelCase =('downloads' if default_cache_dir else custom_cache_dir, 'extracted')
else:
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_DIR' , UpperCAmelCase__ )
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(UpperCAmelCase__ ) )
__UpperCamelCase =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
__UpperCamelCase =xz_file
__UpperCamelCase =(
DownloadConfig(extract_compressed_file=UpperCAmelCase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=UpperCAmelCase__ )
)
__UpperCamelCase =cached_path(UpperCAmelCase__ , download_config=UpperCAmelCase__ )
assert Path(UpperCAmelCase__ ).parent.parts[-2:] == expected
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
# absolute path
__UpperCamelCase =str(Path(UpperCAmelCase__ ).resolve() )
assert cached_path(UpperCAmelCase__ ) == text_file
# relative path
__UpperCamelCase =str(Path(UpperCAmelCase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(UpperCAmelCase__ ) == text_file
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple ) -> int:
# absolute path
__UpperCamelCase =str(tmp_path.resolve() / '__missing_file__.txt' )
with pytest.raises(UpperCAmelCase__ ):
cached_path(UpperCAmelCase__ )
# relative path
__UpperCamelCase ='./__missing_file__.txt'
with pytest.raises(UpperCAmelCase__ ):
cached_path(UpperCAmelCase__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> List[Any]:
__UpperCamelCase =get_from_cache(F'tmp://{tmpfs_file}' )
with open(UpperCAmelCase__ ) as f:
__UpperCamelCase =f.read()
assert output_file_content == FILE_CONTENT
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCAmelCase__ )
def _UpperCAmelCase ( ) -> List[str]:
with pytest.raises(UpperCAmelCase__ ):
cached_path('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCAmelCase__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ) -> int:
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCAmelCase__ ):
http_get('https://huggingface.co' , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
http_head('https://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCAmelCase__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Any:
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCAmelCase__ ):
ftp_get('ftp://huggingface.co' , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
ftp_head('ftp://huggingface.co' )
@patch('datasets.config.HF_DATASETS_OFFLINE' , UpperCAmelCase__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
__UpperCamelCase =tmp_path_factory.mktemp('data' ) / 'file.html'
with pytest.raises(UpperCAmelCase__ ):
fsspec_get('s3://huggingface.co' , temp_file=UpperCAmelCase__ )
with pytest.raises(UpperCAmelCase__ ):
fsspec_head('s3://huggingface.co' )
| 715 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , ) -> List[str]:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =False
__UpperCamelCase =2
__UpperCamelCase =99
__UpperCamelCase =0
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase ='last'
__UpperCamelCase =True
__UpperCamelCase =None
__UpperCamelCase =0
def _a ( self ) -> List[Any]:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
__UpperCamelCase =None
if self.use_input_lengths:
__UpperCamelCase =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Any:
__UpperCamelCase =TFFlaubertModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertWithLMHeadModel(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[Any]:
__UpperCamelCase =TFFlaubertForQuestionAnsweringSimple(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =TFFlaubertForSequenceClassification(A_ )
__UpperCamelCase ={'input_ids': input_ids, 'lengths': input_lengths}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFFlaubertForTokenClassification(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFFlaubertForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : Optional[int] = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCAmelCase__ : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : Optional[int] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> List[str]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _a ( self ) -> Dict:
__UpperCamelCase =TFFlaubertModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , emb_dim=37 )
def _a ( self ) -> Dict:
self.config_tester.run_common_tests()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def _a ( self ) -> Any:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def _a ( self ) -> Optional[int]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> int:
__UpperCamelCase =TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
__UpperCamelCase =tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase =model(A_ )[0]
__UpperCamelCase =tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
__UpperCamelCase =tf.convert_to_tensor(
[
[
[-1.876_8773, -1.56_6555, 0.2707_2418],
[-1.692_0038, -0.587_3505, 1.932_9599],
[-2.956_3985, -1.699_3835, 1.797_2052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 682 | 0 |
'''simple docstring'''
import os
import sys
import unittest
_A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
_A = os.path.join(git_repo_path, 'src', 'transformers')
_A = '\n{0} = None\n'
_A = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n'
_A = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n'
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> str:
__UpperCamelCase =find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(A_ )
__UpperCamelCase =find_backend(' if not is_tokenizers_available():' )
self.assertEqual(A_ , 'tokenizers' )
__UpperCamelCase =find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(A_ , 'tensorflow_text' )
__UpperCamelCase =find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tokenizers' )
__UpperCamelCase =find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tensorflow_text' )
__UpperCamelCase =find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(A_ , 'sentencepiece_and_tokenizers_and_vision' )
def _a ( self ) -> List[Any]:
__UpperCamelCase =read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , A_ )
self.assertIn('tensorflow_text' , A_ )
self.assertIn('sentencepiece_and_tokenizers' , A_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(A_ , '\nCONSTANT = None\n' )
__UpperCamelCase =create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
A_ , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
__UpperCamelCase ='\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
__UpperCamelCase =create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(A_ , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase ='# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
__UpperCamelCase =create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , A_ )
| 716 |
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ):
# ===== initialization =====
__UpperCamelCase =Mock()
__UpperCamelCase =conn, Mock()
__UpperCamelCase =iter([1, None] )
__UpperCamelCase =lambda SCREAMING_SNAKE_CASE__ : next(SCREAMING_SNAKE_CASE__ )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=SCREAMING_SNAKE_CASE__ )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 682 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = """https://openaipublic.azureedge.net/jukebox/models/"""
_A = {
"""jukebox-1b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""1b_lyrics/prior_level_2.pth.tar""",
],
"""jukebox-5b-lyrics""": [
"""5b/vqvae.pth.tar""",
"""5b/prior_level_0.pth.tar""",
"""5b/prior_level_1.pth.tar""",
"""5b_lyrics/prior_level_2.pth.tar""",
],
}
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
__UpperCamelCase =key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
__UpperCamelCase =key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
__UpperCamelCase =key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
__UpperCamelCase =key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
__UpperCamelCase =key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
__UpperCamelCase =key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__UpperCamelCase =key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
__UpperCamelCase =key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ):
__UpperCamelCase ={}
import re
__UpperCamelCase =re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__UpperCamelCase =re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__UpperCamelCase =re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__UpperCamelCase =re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
__UpperCamelCase =re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__UpperCamelCase =re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
__UpperCamelCase =re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
__UpperCamelCase =re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
__UpperCamelCase =re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =re_encoder_block_conv_in.match(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =regex_match.groups()
__UpperCamelCase =int(groups[2] ) * 2 + int(groups[3] )
__UpperCamelCase =F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
__UpperCamelCase =re_encoder_block_conv_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =re_encoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =regex_match.groups()
__UpperCamelCase =int(groups[2] ) * 2 + int(groups[3] )
__UpperCamelCase ={'1': 1, '3': 2}[groups[-2]]
__UpperCamelCase =F'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
__UpperCamelCase =F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__UpperCamelCase =prefix + resnet_block
__UpperCamelCase =re_encoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_encoder_block_proj_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =re_encoder_block_proj_out.match(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =regex_match.groups()
__UpperCamelCase =F'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
__UpperCamelCase =re_encoder_block_proj_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =re_decoder_block_conv_out.match(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =regex_match.groups()
__UpperCamelCase =int(groups[2] ) * 2 + int(groups[3] ) - 2
__UpperCamelCase =F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
__UpperCamelCase =re_decoder_block_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =re_decoder_block_resnet.match(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =regex_match.groups()
__UpperCamelCase =int(groups[2] ) * 2 + int(groups[3] ) - 2
__UpperCamelCase ={'1': 1, '3': 2}[groups[-2]]
__UpperCamelCase =F'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
__UpperCamelCase =F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__UpperCamelCase =prefix + resnet_block
__UpperCamelCase =re_decoder_block_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_decoder_block_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =re_decoder_block_proj_in.match(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =regex_match.groups()
__UpperCamelCase =F'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
__UpperCamelCase =re_decoder_block_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =re_prior_cond_conv_out.match(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =regex_match.groups()
__UpperCamelCase =int(groups[1] ) * 2 + int(groups[2] ) - 2
__UpperCamelCase =F'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
__UpperCamelCase =re_prior_cond_conv_out.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_resnet.fullmatch(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =re_prior_cond_resnet.match(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =regex_match.groups()
__UpperCamelCase =int(groups[1] ) * 2 + int(groups[2] ) - 2
__UpperCamelCase ={'1': 1, '3': 2}[groups[-2]]
__UpperCamelCase =F'conditioner_blocks.upsampler.upsample_block.{block_index}.'
__UpperCamelCase =F'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
__UpperCamelCase =prefix + resnet_block
__UpperCamelCase =re_prior_cond_resnet.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif re_prior_cond_proj_in.fullmatch(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =re_prior_cond_proj_in.match(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =regex_match.groups()
__UpperCamelCase =F'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
__UpperCamelCase =re_prior_cond_proj_in.sub(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# keep original key
else:
__UpperCamelCase =original_key
__UpperCamelCase =replace_key(SCREAMING_SNAKE_CASE__ )
if F'{key_prefix}.{key}' not in model_state_dict or key is None:
print(F'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[F'{key_prefix}.{key}'].shape:
__UpperCamelCase =model_state_dict[F'{key_prefix}.{key}']
print(F'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
__UpperCamelCase =original_key
__UpperCamelCase =original_key
__UpperCamelCase =value
return new_dict
@torch.no_grad()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int=None , SCREAMING_SNAKE_CASE__ : Dict=None ):
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
__UpperCamelCase =requests.get(F'{PREFIX}{file}' , allow_redirects=SCREAMING_SNAKE_CASE__ )
os.makedirs(F'{pytorch_dump_folder_path}/' , exist_ok=SCREAMING_SNAKE_CASE__ )
open(F'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' , 'wb' ).write(r.content )
__UpperCamelCase =MODEL_MAPPING[model_name.split('/' )[-1]]
__UpperCamelCase =JukeboxConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =JukeboxModel(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =[]
__UpperCamelCase ={}
for i, dict_name in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =torch.load(F'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
__UpperCamelCase ={}
for k in old_dic.keys():
if k.endswith('.b' ):
__UpperCamelCase =old_dic[k]
elif k.endswith('.w' ):
__UpperCamelCase =old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__UpperCamelCase =old_dic[k]
else:
__UpperCamelCase =old_dic[k]
__UpperCamelCase ='vqvae' if i == 0 else F'priors.{3 - i}'
__UpperCamelCase =fix_jukebox_keys(SCREAMING_SNAKE_CASE__ , model.state_dict() , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
weight_dict.append(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =weight_dict.pop(0 )
model.vqvae.load_state_dict(SCREAMING_SNAKE_CASE__ )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(F'{pytorch_dump_folder_path}/mapping.json' , 'w' ) as txtfile:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
return weight_dict
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
_A = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 717 |
import math
from collections.abc import Callable
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Callable[[float], float] , SCREAMING_SNAKE_CASE__ : float , SCREAMING_SNAKE_CASE__ : float ):
__UpperCamelCase =xa
__UpperCamelCase =xa
while True:
if x_n == x_na or function(SCREAMING_SNAKE_CASE__ ) == function(SCREAMING_SNAKE_CASE__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
__UpperCamelCase =x_na - (
function(SCREAMING_SNAKE_CASE__ ) / ((function(SCREAMING_SNAKE_CASE__ ) - function(SCREAMING_SNAKE_CASE__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
__UpperCamelCase =x_na
__UpperCamelCase =x_na
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : float ):
return math.pow(SCREAMING_SNAKE_CASE__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 682 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_A = {"configuration_van": ["VAN_PRETRAINED_CONFIG_ARCHIVE_MAP", "VanConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"VAN_PRETRAINED_MODEL_ARCHIVE_LIST",
"VanForImageClassification",
"VanModel",
"VanPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 718 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_A = logging.getLogger(__name__)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self ) -> int:
__UpperCamelCase =False
def _a ( self , A_ , A_ , A_ , A_ ) -> List[Any]:
if not self.initialized:
__UpperCamelCase =RagRetriever(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =True
def _a ( self ) -> Optional[Any]:
self.retriever.index.init_index()
def _a ( self , A_ , A_ ) -> Dict:
__UpperCamelCase , __UpperCamelCase =self.retriever._main_retrieve(A_ , A_ )
return doc_ids, retrieved_doc_embeds
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def __init__( self , A_ , A_ , A_ , A_ , A_=None ) -> Dict:
if index is not None and index.is_initialized() and len(A_ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , index=A_ , init_retrieval=A_ , )
__UpperCamelCase =retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(A_ , A_ , A_ , A_ )
for worker in self.retrieval_workers
] )
def _a ( self ) -> Union[str, Any]:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _a ( self , A_ , A_ ) -> Optional[int]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
__UpperCamelCase =self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
__UpperCamelCase , __UpperCamelCase =ray.get(random_worker.retrieve.remote(A_ , A_ ) )
else:
__UpperCamelCase , __UpperCamelCase =self._main_retrieve(A_ , A_ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A_ )
@classmethod
def _a ( cls , A_ , A_=None , **A_ ) -> List[str]:
return super(A_ , cls ).get_tokenizers(A_ , A_ , **A_ )
@classmethod
def _a ( cls , A_ , A_ , A_=None , **A_ ) -> str:
__UpperCamelCase =kwargs.pop('config' , A_ ) or RagConfig.from_pretrained(A_ , **A_ )
__UpperCamelCase =RagTokenizer.from_pretrained(A_ , config=A_ )
__UpperCamelCase =rag_tokenizer.question_encoder
__UpperCamelCase =rag_tokenizer.generator
if indexed_dataset is not None:
__UpperCamelCase ='custom'
__UpperCamelCase =CustomHFIndex(config.retrieval_vector_size , A_ )
else:
__UpperCamelCase =cls._build_index(A_ )
return cls(
A_ , question_encoder_tokenizer=A_ , generator_tokenizer=A_ , retrieval_workers=A_ , index=A_ , )
| 682 | 0 |
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
while b:
__UpperCamelCase =b, a % b
return a
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
return a if b == 0 else euclidean_gcd_recursive(snake_case_ , a % b )
def _UpperCAmelCase ( ):
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 719 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=64 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=[1, 16, 4, 4] , A_=None , ) -> Any:
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =is_training
__UpperCamelCase =use_labels
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =type_sequence_label_size
__UpperCamelCase =initializer_range
__UpperCamelCase =scope
__UpperCamelCase =backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCamelCase =(self.image_size // 32) ** 2
__UpperCamelCase =num_patches + 1
def _a ( self ) -> str:
__UpperCamelCase =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =self.get_config()
return config, pixel_values, labels
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase ={
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=A_ , )
def _a ( self , A_ , A_ , A_ ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModel(config=A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.type_sequence_label_size
__UpperCamelCase =ViTHybridForImageClassification(A_ )
model.to(A_ )
model.eval()
__UpperCamelCase =model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =config_and_inputs
__UpperCamelCase ={'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( A_ , A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Union[str, Any] = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = False
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =ViTHybridModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _a ( self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _a ( self ) -> List[str]:
pass
def _a ( self ) -> List[Any]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCamelCase =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _a ( self ) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(A_ )
__UpperCamelCase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase =[*signature.parameters.keys()]
__UpperCamelCase =['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> Union[str, Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _a ( self ) -> int:
__UpperCamelCase , __UpperCamelCase =self.model_tester.prepare_config_and_inputs_for_common()
__UpperCamelCase =_config_zero_init(A_ )
for model_class in self.all_model_classes:
__UpperCamelCase =model_class(config=A_ )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCamelCase =[f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _a ( self ) -> int:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase =ViTHybridModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCAmelCase ( ):
__UpperCamelCase =Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ) -> Union[str, Any]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _a ( self ) -> str:
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
A_ )
__UpperCamelCase =self.default_image_processor
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' ).to(A_ )
# forward pass
with torch.no_grad():
__UpperCamelCase =model(**A_ )
# verify the logits
__UpperCamelCase =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , A_ )
__UpperCamelCase =torch.tensor([-1.9090, -0.4993, -0.2389] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
@slow
@require_accelerate
def _a ( self ) -> Optional[int]:
__UpperCamelCase =ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
__UpperCamelCase =ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
__UpperCamelCase =prepare_img()
__UpperCamelCase =image_processor(images=A_ , return_tensors='pt' )
__UpperCamelCase =model(**A_ )
__UpperCamelCase =outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCamelCase =logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 682 | 0 |
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self , A_ , A_=13 , A_=7 , A_=True , A_=True , A_=True , A_=True , A_=99 , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ) -> int:
__UpperCamelCase =parent
__UpperCamelCase =13
__UpperCamelCase =7
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =True
__UpperCamelCase =99
__UpperCamelCase =32
__UpperCamelCase =2
__UpperCamelCase =4
__UpperCamelCase =37
__UpperCamelCase ='gelu'
__UpperCamelCase =0.1
__UpperCamelCase =0.1
__UpperCamelCase =512
__UpperCamelCase =16
__UpperCamelCase =2
__UpperCamelCase =0.02
__UpperCamelCase =3
__UpperCamelCase =4
__UpperCamelCase =None
def _a ( self ) -> Any:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase =None
if self.use_input_mask:
__UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCamelCase =None
if self.use_token_type_ids:
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCamelCase =None
__UpperCamelCase =None
__UpperCamelCase =None
if self.use_labels:
__UpperCamelCase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCamelCase =ids_tensor([self.batch_size] , self.num_choices )
__UpperCamelCase =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=A_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
__UpperCamelCase =TFRoFormerModel(config=A_ )
__UpperCamelCase ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__UpperCamelCase =[input_ids, input_mask]
__UpperCamelCase =model(A_ )
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =True
__UpperCamelCase =TFRoFormerForCausalLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Any:
__UpperCamelCase =TFRoFormerForMaskedLM(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Dict:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForSequenceClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =self.num_choices
__UpperCamelCase =TFRoFormerForMultipleChoice(config=A_ )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase =tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
__UpperCamelCase ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> str:
__UpperCamelCase =self.num_labels
__UpperCamelCase =TFRoFormerForTokenClassification(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _a ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ) -> Optional[int]:
__UpperCamelCase =TFRoFormerForQuestionAnswering(config=A_ )
__UpperCamelCase ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__UpperCamelCase =model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) =config_and_inputs
__UpperCamelCase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__ ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase__ : List[Any] = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Union[str, Any] = False
def _a ( self , A_ , A_ , A_ , A_ , A_ ) -> Tuple:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def _a ( self ) -> Optional[int]:
__UpperCamelCase =TFRoFormerModelTester(self )
__UpperCamelCase =ConfigTester(self , config_class=A_ , hidden_size=37 )
def _a ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _a ( self ) -> str:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A_ )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A_ )
def _a ( self ) -> Optional[int]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A_ )
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _a ( self ) -> Any:
__UpperCamelCase =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(A_ )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[str]:
__UpperCamelCase =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__UpperCamelCase =tf.constant([[0, 1, 2, 3, 4, 5]] )
__UpperCamelCase =model(A_ )[0]
# TODO Replace vocab size
__UpperCamelCase =50000
__UpperCamelCase =[1, 6, vocab_size]
self.assertEqual(output.shape , A_ )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__UpperCamelCase =tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , A_ , atol=1E-4 )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Dict = 1e-4
def _a ( self ) -> Any:
__UpperCamelCase =tf.constant([[4, 10]] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__UpperCamelCase =emba(input_ids.shape )
__UpperCamelCase =tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
def _a ( self ) -> List[str]:
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
__UpperCamelCase =emba.weight[:3, :5]
tf.debugging.assert_near(A_ , A_ , atol=self.tolerance )
@require_tf
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = 1e-4
def _a ( self ) -> Union[str, Any]:
# 2,12,16,64
__UpperCamelCase =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
__UpperCamelCase =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__UpperCamelCase =embed_positions([2, 16, 768] )[None, None, :, :]
__UpperCamelCase , __UpperCamelCase =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
A_ , A_ , A_ )
__UpperCamelCase =tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__UpperCamelCase =tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , A_ , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , A_ , atol=self.tolerance ) | 720 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : LevitConfig , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : bool = True ):
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__UpperCamelCase =timm.create_model('levit_128s' , pretrained=SCREAMING_SNAKE_CASE__ )
else:
__UpperCamelCase =timm.create_model('levit_128' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 1_92:
__UpperCamelCase =timm.create_model('levit_192' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 2_56:
__UpperCamelCase =timm.create_model('levit_256' , pretrained=SCREAMING_SNAKE_CASE__ )
if hidden_sizes == 3_84:
__UpperCamelCase =timm.create_model('levit_384' , pretrained=SCREAMING_SNAKE_CASE__ )
from_model.eval()
__UpperCamelCase =LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__ ).eval()
__UpperCamelCase =OrderedDict()
__UpperCamelCase =from_model.state_dict()
__UpperCamelCase =list(from_model.state_dict().keys() )
__UpperCamelCase =list(our_model.state_dict().keys() )
print(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
__UpperCamelCase =weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =torch.randn((2, 3, 2_24, 2_24) )
__UpperCamelCase =from_model(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =our_model(SCREAMING_SNAKE_CASE__ ).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ), "The model logits don't match the original one."
__UpperCamelCase =name
print(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
__UpperCamelCase =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : str = None , SCREAMING_SNAKE_CASE__ : bool = True ):
__UpperCamelCase ='imagenet-1k-id2label.json'
__UpperCamelCase =10_00
__UpperCamelCase =(1, num_labels)
__UpperCamelCase ='huggingface/label-files'
__UpperCamelCase =num_labels
__UpperCamelCase =json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='dataset' ) , 'r' ) )
__UpperCamelCase ={int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
__UpperCamelCase =idalabel
__UpperCamelCase ={v: k for k, v in idalabel.items()}
__UpperCamelCase =partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase ={
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
__UpperCamelCase ={
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 682 | 0 |
from __future__ import annotations
import requests
_A = set(
'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split()
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , SCREAMING_SNAKE_CASE__ : Optional[Any] = "new" , SCREAMING_SNAKE_CASE__ : Tuple = None ):
__UpperCamelCase =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCamelCase__ ) - valid_terms ) ):
__UpperCamelCase =F'Invalid search term: {invalid_search_terms}'
raise ValueError(lowerCamelCase__ )
__UpperCamelCase =requests.get(
F'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_29:
raise requests.HTTPError
__UpperCamelCase =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCamelCase__ )}
__UpperCamelCase ={}
for id_ in range(lowerCamelCase__ ):
__UpperCamelCase ={
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
| 721 |
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def _a ( self ) -> Any:
__UpperCamelCase ='laion/clap-htsat-unfused'
__UpperCamelCase =tempfile.mkdtemp()
def _a ( self , **A_ ) -> List[Any]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **A_ )
def _a ( self , **A_ ) -> Dict:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **A_ )
def _a ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def _a ( self ) -> str:
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> int:
__UpperCamelCase =ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase =self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase =self.get_feature_extractor(do_normalize=A_ , padding_value=1.0 )
__UpperCamelCase =ClapProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , A_ )
def _a ( self ) -> str:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =floats_list((3, 1000) )
__UpperCamelCase =feature_extractor(A_ , return_tensors='np' )
__UpperCamelCase =processor(audios=A_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a ( self ) -> int:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase ='This is a test string'
__UpperCamelCase =processor(text=A_ )
__UpperCamelCase =tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _a ( self ) -> List[str]:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
__UpperCamelCase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__UpperCamelCase =processor.batch_decode(A_ )
__UpperCamelCase =tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
def _a ( self ) -> Tuple:
__UpperCamelCase =self.get_feature_extractor()
__UpperCamelCase =self.get_tokenizer()
__UpperCamelCase =ClapProcessor(tokenizer=A_ , feature_extractor=A_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
| 682 | 0 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCAmelCase__ ( A_ ):
"""simple docstring"""
def _a ( self , A_ ) -> List[Any]:
with open(A_ , encoding='utf-8' ) as input_file:
__UpperCamelCase =re.compile(r'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
__UpperCamelCase =input_file.read()
__UpperCamelCase =regexp.search(A_ )
return match
def _a ( self , A_ ) -> Dict:
with open(A_ , encoding='utf-8' ) as input_file:
__UpperCamelCase =re.compile(r'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
__UpperCamelCase =input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__UpperCamelCase =regexp.finditer(A_ )
__UpperCamelCase =[match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _a ( self ) -> Dict:
__UpperCamelCase =Path('./datasets' )
__UpperCamelCase =list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(A_ ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def _a ( self ) -> List[str]:
__UpperCamelCase =Path('./datasets' )
__UpperCamelCase =list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(A_ ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 700 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[Any]=None ):
if subparsers is not None:
__UpperCamelCase =subparsers.add_parser('test' )
else:
__UpperCamelCase =argparse.ArgumentParser('Accelerate test command' )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE__ )
return parser
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any ):
__UpperCamelCase =os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['test_utils', 'scripts', 'test_script.py'] )
if args.config_file is None:
__UpperCamelCase =script_name
else:
__UpperCamelCase =F'--config_file={args.config_file} {script_name}'
__UpperCamelCase =['accelerate-launch'] + test_args.split()
__UpperCamelCase =execute_subprocess_async(SCREAMING_SNAKE_CASE__ , env=os.environ.copy() )
if result.returncode == 0:
print('Test is a success! You are ready for your distributed training!' )
def _UpperCAmelCase ( ):
__UpperCamelCase =test_command_parser()
__UpperCamelCase =parser.parse_args()
test_command(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 682 | 0 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
_A = logging.getLogger(__name__)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
# save results
if os.path.exists(SCREAMING_SNAKE_CASE__ ):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE__ , 'config.json' ) )
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(SCREAMING_SNAKE_CASE__ , 'pytorch_model.bin' ) )
else:
os.makedirs(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=False ):
__UpperCamelCase =2
if unlogit:
__UpperCamelCase =torch.pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =p * torch.log(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =0
return -plogp.sum(dim=-1 )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
logger.info('lv, h >\t' + '\t'.join(F'{x + 1}' for x in range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
for row in range(len(SCREAMING_SNAKE_CASE__ ) ):
if tensor.dtype != torch.long:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:.5f}' for x in tensor[row].cpu().data ) )
else:
logger.info(F'layer {row + 1}:\t' + '\t'.join(F'{x:d}' for x in tensor[row].cpu().data ) )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : Any=None , SCREAMING_SNAKE_CASE__ : List[str]=False ):
__UpperCamelCase , __UpperCamelCase =model.config.num_hidden_layers, model.config.num_attention_heads
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(args.device )
__UpperCamelCase =torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(args.device )
if head_mask is None:
__UpperCamelCase =torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(args.device )
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__UpperCamelCase =None
__UpperCamelCase =0.0
__UpperCamelCase =0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
__UpperCamelCase =tuple(t.to(args.device ) for t in inputs )
((__UpperCamelCase ) , ) =inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__UpperCamelCase =model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =(
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =entropy(attn.detach() , SCREAMING_SNAKE_CASE__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__UpperCamelCase =2
__UpperCamelCase =torch.pow(torch.pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
__UpperCamelCase =(head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
logger.info('Head ranked by importance scores' )
__UpperCamelCase =torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
__UpperCamelCase =torch.arange(
head_importance.numel() , device=args.device )
__UpperCamelCase =head_ranks.view_as(SCREAMING_SNAKE_CASE__ )
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
return attn_entropy, head_importance, total_loss
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , SCREAMING_SNAKE_CASE__ , original_score * args.masking_threshold )
__UpperCamelCase =torch.ones_like(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =max(1 , int(new_head_mask.numel() * args.masking_amount ) )
__UpperCamelCase =original_score
while current_score >= original_score * args.masking_threshold:
__UpperCamelCase =new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__UpperCamelCase =float('Inf' )
__UpperCamelCase =head_importance.view(-1 ).sort()[1]
if len(SCREAMING_SNAKE_CASE__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
__UpperCamelCase =current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
__UpperCamelCase =new_head_mask.view(-1 )
__UpperCamelCase =0.0
__UpperCamelCase =new_head_mask.view_as(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
# Compute metric and head importance again
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , SCREAMING_SNAKE_CASE__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(SCREAMING_SNAKE_CASE__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =datetime.now()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , compute_importance=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =1 / loss
__UpperCamelCase =datetime.now() - before_time
__UpperCamelCase =sum(p.numel() for p in model.parameters() )
__UpperCamelCase ={
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase =[
v,
]
assert sum(len(SCREAMING_SNAKE_CASE__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =sum(p.numel() for p in model.parameters() )
__UpperCamelCase =datetime.now()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase =compute_heads_importance(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , compute_importance=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , actually_pruned=SCREAMING_SNAKE_CASE__ , )
__UpperCamelCase =1 / loss
__UpperCamelCase =datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(SCREAMING_SNAKE_CASE__ , args.output_dir )
def _UpperCAmelCase ( ):
__UpperCamelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=SCREAMING_SNAKE_CASE__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=SCREAMING_SNAKE_CASE__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=SCREAMING_SNAKE_CASE__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=SCREAMING_SNAKE_CASE__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=SCREAMING_SNAKE_CASE__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=SCREAMING_SNAKE_CASE__ , help='Batch size.' )
parser.add_argument('--seed' , type=SCREAMING_SNAKE_CASE__ , default=42 )
parser.add_argument('--local_rank' , type=SCREAMING_SNAKE_CASE__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=SCREAMING_SNAKE_CASE__ , default='' , help='Can be used for distant debugging.' )
__UpperCamelCase =parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__UpperCamelCase =torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
__UpperCamelCase =0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
__UpperCamelCase =torch.device('cuda' , args.local_rank )
__UpperCamelCase =1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
__UpperCamelCase =GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
__UpperCamelCase =nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE__ )
elif args.n_gpu > 1:
__UpperCamelCase =nn.DataParallel(SCREAMING_SNAKE_CASE__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , SCREAMING_SNAKE_CASE__ )
# Prepare dataset
__UpperCamelCase =np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
__UpperCamelCase =(torch.from_numpy(SCREAMING_SNAKE_CASE__ ),)
__UpperCamelCase =TensorDataset(*SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =RandomSampler(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__UpperCamelCase =mask_heads(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
prune_heads(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 701 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : int ):
__UpperCamelCase =checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flatten_dict(SCREAMING_SNAKE_CASE__ )
return flax_params
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
__UpperCamelCase ={}
__UpperCamelCase ={
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
__UpperCamelCase ={
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
__UpperCamelCase ='.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
__UpperCamelCase =new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
__UpperCamelCase =re.sub(r'layers_(\d+)' , r'layer.\1' , SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =flax_dict[key]
__UpperCamelCase ={}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
__UpperCamelCase =torch.from_numpy(converted_dict[key].T )
else:
__UpperCamelCase =torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : str=False ):
__UpperCamelCase =get_flax_param(SCREAMING_SNAKE_CASE__ )
if not use_large:
__UpperCamelCase =PixaStructVisionConfig()
__UpperCamelCase =PixaStructTextConfig()
else:
__UpperCamelCase =PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
__UpperCamelCase =PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
__UpperCamelCase =PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =rename_and_convert_flax_params(SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__UpperCamelCase =AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
__UpperCamelCase =PixaStructImageProcessor()
__UpperCamelCase =PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
if use_large:
__UpperCamelCase =40_96
__UpperCamelCase =True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Model saved in {}'.format(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
_A = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 682 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
_A = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 682 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.