code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from collections.abc import Sequence
def _A ( lowercase , lowercase = False ):
"""simple docstring"""
if not arr:
return 0
a =0 if allow_empty_subarrays else float('''-inf''' )
a =0.0
for num in arr:
a =max(0 if allow_empty_subarrays else num , curr_sum + num )
a =max(lowercase , lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
lowerCamelCase_ : List[str] = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'{max_subarray_sum(nums) = }') | 81 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325 | 0 |
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
_lowerCAmelCase = F'Input value of [number={number}] must be an integer'
raise TypeError(snake_case )
if number < 0:
return False
_lowerCAmelCase = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 0 |
'''simple docstring'''
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def A__ ( UpperCAmelCase_=None , UpperCAmelCase_=None ):
return field(default_factory=lambda: default , metadata=UpperCAmelCase_ )
@dataclass
class lowercase__ :
lowercase__ = field(
metadata={"""help""": """The csv file to plot."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , )
lowercase__ = field(
default=lowercase , metadata={
"""help""": """Whether the csv file has training results or inference results. Defaults to inference results."""
} , )
lowercase__ = field(
default=lowercase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , )
lowercase__ = list_field(
default=lowercase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} )
def A__ ( UpperCAmelCase_ ):
try:
int(UpperCAmelCase_ )
return True
except ValueError:
return False
def A__ ( UpperCAmelCase_ ):
try:
float(UpperCAmelCase_ )
return True
except ValueError:
return False
class lowercase__ :
def __init__( self : List[Any] ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = args
_UpperCamelCase : Optional[Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file ,newline='' ) as csv_file:
_UpperCamelCase : List[Any] = csv.DictReader(lowerCamelCase__ )
for row in reader:
_UpperCamelCase : Any = row['model']
self.result_dict[model_name]["bsz"].append(int(row['batch_size'] ) )
self.result_dict[model_name]["seq_len"].append(int(row['sequence_length'] ) )
if can_convert_to_int(row['result'] ):
# value is not None
_UpperCamelCase : Optional[int] = int(row['result'] )
elif can_convert_to_float(row['result'] ):
# value is not None
_UpperCamelCase : Dict = float(row['result'] )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Optional[int] = plt.subplots()
_UpperCamelCase : List[str] = 'Time usage' if self.args.is_time else 'Memory usage'
_UpperCamelCase : List[Any] = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('log' )
ax.set_yscale('log' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCamelCase : Dict = sorted(set(self.result_dict[model_name]['bsz'] ) )
_UpperCamelCase : Optional[int] = sorted(set(self.result_dict[model_name]['seq_len'] ) )
_UpperCamelCase : List[str] = self.result_dict[model_name]['result']
((_UpperCamelCase) , (_UpperCamelCase)) : Tuple = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCamelCase : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCamelCase : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] ,dtype=lowerCamelCase__ ,)
else:
_UpperCamelCase : str = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] ,dtype=np.floataa ,)
((_UpperCamelCase) , (_UpperCamelCase)) : Tuple = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_UpperCamelCase : Dict = np.asarray(lowerCamelCase__ ,lowerCamelCase__ )[: len(lowerCamelCase__ )]
plt.scatter(
lowerCamelCase__ ,lowerCamelCase__ ,label=F'{label_model_name} - {inner_loop_label}: {inner_loop_value}' )
plt.plot(lowerCamelCase__ ,lowerCamelCase__ ,'--' )
title_str += F' {label_model_name} vs.'
_UpperCamelCase : Optional[Any] = title_str[:-4]
_UpperCamelCase : str = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(lowerCamelCase__ )
plt.xlabel(lowerCamelCase__ )
plt.ylabel(lowerCamelCase__ )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def A__ ( ):
_UpperCamelCase : str = HfArgumentParser(UpperCAmelCase_ )
_UpperCamelCase : Dict = parser.parse_args_into_dataclasses()[0]
_UpperCamelCase : List[str] = Plot(args=UpperCAmelCase_ )
plot.plot()
if __name__ == "__main__":
main()
| 83 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325 | 0 |
"""simple docstring"""
from __future__ import annotations
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A ) -> List[Any]:
lowerCAmelCase_ , lowerCAmelCase_ :List[Any] = text, pattern
lowerCAmelCase_ , lowerCAmelCase_ :List[str] = len(__A ), len(__A )
def __lowerCAmelCase ( self , __A ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __lowerCAmelCase ( self , __A ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __lowerCAmelCase ( self ) -> list[int]:
# searches pattern in text and returns index positions
lowerCAmelCase_ :List[str] = []
for i in range(self.textLen - self.patLen + 1 ):
lowerCAmelCase_ :Any = self.mismatch_in_text(__A )
if mismatch_index == -1:
positions.append(__A )
else:
lowerCAmelCase_ :int = self.match_in_pattern(self.text[mismatch_index] )
lowerCAmelCase_ :Any = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
__UpperCAmelCase = 'ABAABA'
__UpperCAmelCase = 'AB'
__UpperCAmelCase = BoyerMooreSearch(text, pattern)
__UpperCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 84 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase_( snake_case : list[float] ):
'''simple docstring'''
snake_case_ = 0.00
snake_case_ = 0
for resistor in resistors:
if resistor <= 0:
snake_case_ = f'Resistor at index {index} has a negative or zero value!'
raise ValueError(snake_case )
first_sum += 1 / float(snake_case )
index += 1
return 1 / first_sum
def UpperCamelCase_( snake_case : list[float] ):
'''simple docstring'''
snake_case_ = 0.00
snake_case_ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
snake_case_ = f'Resistor at index {index} has a negative value!'
raise ValueError(snake_case )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85 |
import logging
import os
from .state import PartialState
class A__ ( logging.LoggerAdapter ):
@staticmethod
def a__ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase )
__lowercase = kwargs.pop('in_order' , _UpperCAmelCase )
if self.isEnabledFor(_UpperCAmelCase ):
if self._should_log(_UpperCAmelCase ):
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif in_order:
__lowercase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]:
if log_level is None:
__lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE )
__lowercase = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 325 | 0 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = """ResNetConfig"""
# Base docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = [1, 2_048, 7, 7]
# Image classification docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = """tiger cat"""
lowerCamelCase__ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" ):
super().__init__()
__lowerCAmelCase : Optional[int] = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = self.convolution(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.normalization(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__()
__lowerCAmelCase : Dict = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCAmelCase : Dict = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCAmelCase : int = config.num_channels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowerCAmelCase : List[str] = self.embedder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.pooler(_SCREAMING_SNAKE_CASE )
return embedding
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 ):
super().__init__()
__lowerCAmelCase : Optional[Any] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.convolution(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" ):
super().__init__()
__lowerCAmelCase : Any = in_channels != out_channels or stride != 1
__lowerCAmelCase : Any = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase : List[str] = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=_SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase : Optional[int] = ACTaFN[activation]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = hidden_state
__lowerCAmelCase : List[str] = self.layer(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
__lowerCAmelCase : Union[str, Any] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" , _SCREAMING_SNAKE_CASE = 4 ):
super().__init__()
__lowerCAmelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCAmelCase : Dict = out_channels // reduction
__lowerCAmelCase : Any = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase : Any = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase : str = ACTaFN[activation]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = hidden_state
__lowerCAmelCase : Optional[Any] = self.layer(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
__lowerCAmelCase : str = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , ):
super().__init__()
__lowerCAmelCase : str = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
__lowerCAmelCase : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = input
for layer in self.layers:
__lowerCAmelCase : Optional[int] = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__()
__lowerCAmelCase : int = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCAmelCase : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(ResNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True ):
__lowerCAmelCase : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase : Union[str, Any] = hidden_states + (hidden_state,)
__lowerCAmelCase : List[str] = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
__lowerCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE , )
class A__ ( _lowerCamelCase):
A_ : Tuple = ResNetConfig
A_ : List[Any] = 'resnet'
A_ : Optional[int] = 'pixel_values'
A_ : int = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = value
lowerCamelCase__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , _lowerCamelCase , )
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = config
__lowerCAmelCase : str = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = ResNetEncoder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Any = self.embedder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = encoder_outputs[0]
__lowerCAmelCase : Any = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _lowerCamelCase , )
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = config.num_labels
__lowerCAmelCase : Any = ResNetModel(_SCREAMING_SNAKE_CASE )
# classification head
__lowerCAmelCase : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Any = self.resnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase : Union[str, Any] = self.classifier(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : Optional[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : List[Any] = 'single_label_classification'
else:
__lowerCAmelCase : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCAmelCase : int = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase : List[str] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : List[Any] = CrossEntropyLoss()
__lowerCAmelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Tuple = BCEWithLogitsLoss()
__lowerCAmelCase : str = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
__lowerCAmelCase : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , _lowerCamelCase , )
class A__ ( _lowerCamelCase , _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
super()._init_backbone(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [config.embedding_size] + config.hidden_sizes
__lowerCAmelCase : Optional[int] = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = ResNetEncoder(_SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : int = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Optional[int] = self.embedder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.encoder(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = outputs.hidden_states
__lowerCAmelCase : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCAmelCase : Any = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_SCREAMING_SNAKE_CASE , ) | 86 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 0 |
from __future__ import annotations
class snake_case_ :
def __init__( self : str , lowercase_ : str=None ) -> List[str]:
lowercase__ : str = data
lowercase__ : Union[str, Any] = None
def __repr__( self : int ) -> int:
lowercase__ : Union[str, Any] = []
lowercase__ : List[str] = self
while temp:
string_rep.append(F'''{temp.data}''' )
lowercase__ : Optional[int] = temp.next
return "->".join(lowercase_ )
def lowercase_ ( _lowerCamelCase : list):
if not elements_list:
raise Exception("The Elements List is empty")
lowercase__ : int = Node(elements_list[0])
for i in range(1 , len(_lowerCamelCase)):
lowercase__ : Optional[Any] = Node(elements_list[i])
lowercase__ : Tuple = current.next
return head
def lowercase_ ( _lowerCamelCase : Node):
if head_node is not None and isinstance(_lowerCamelCase , _lowerCamelCase):
print_reverse(head_node.next)
print(head_node.data)
def lowercase_ ( ):
from doctest import testmod
testmod()
lowercase__ : List[str] = make_linked_list([14, 52, 14, 12, 43])
print("Linked List:")
print(_lowerCamelCase)
print("Elements in Reverse:")
print_reverse(_lowerCamelCase)
if __name__ == "__main__":
main()
| 87 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
from __future__ import annotations
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = len(A_ )
# We need to create solution object to save path.
__magic_name__ = [[0 for _ in range(A_ )] for _ in range(A_ )]
__magic_name__ = run_maze(A_, 0, 0, A_ )
if solved:
print("""\n""".join(str(A_ ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def a__ ( A_, A_, A_, A_ ):
'''simple docstring'''
__magic_name__ = len(A_ )
# Final check point.
if i == j == (size - 1):
__magic_name__ = 1
return True
__magic_name__ = (not i < 0) and (not j < 0) # Check lower bounds
__magic_name__ = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__magic_name__ = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__magic_name__ = 1
# check for directions
if (
run_maze(A_, i + 1, A_, A_ )
or run_maze(A_, A_, j + 1, A_ )
or run_maze(A_, i - 1, A_, A_ )
or run_maze(A_, A_, j - 1, A_ )
):
return True
__magic_name__ = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __magic_name__ :
def __init__( self : Union[str, Any] ,_UpperCAmelCase : Dict ,_UpperCAmelCase : Tuple=2 ,_UpperCAmelCase : Optional[int]=32 ,_UpperCAmelCase : Any=16 ,_UpperCAmelCase : Tuple=3 ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Union[str, Any]=32 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : str=[0, 1, 2, 3] ,_UpperCAmelCase : Optional[Any]=4 ,_UpperCAmelCase : int=37 ,_UpperCAmelCase : int="gelu" ,_UpperCAmelCase : Tuple=0.1 ,_UpperCAmelCase : Dict=0.1 ,_UpperCAmelCase : List[Any]=0.02 ,_UpperCAmelCase : str=3 ,_UpperCAmelCase : Dict=[1, 384, 24, 24] ,_UpperCAmelCase : str=True ,_UpperCAmelCase : Any=None ,):
_a : Optional[Any] = parent
_a : Any = batch_size
_a : str = image_size
_a : Any = patch_size
_a : Dict = num_channels
_a : int = is_training
_a : str = use_labels
_a : List[Any] = hidden_size
_a : Dict = num_hidden_layers
_a : int = backbone_out_indices
_a : Any = num_attention_heads
_a : Dict = intermediate_size
_a : Dict = hidden_act
_a : Optional[int] = hidden_dropout_prob
_a : Dict = attention_probs_dropout_prob
_a : Union[str, Any] = initializer_range
_a : Tuple = num_labels
_a : Any = backbone_featmap_shape
_a : Any = scope
_a : Dict = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_a : List[str] = (image_size // patch_size) ** 2
_a : Tuple = num_patches + 1
def __lowercase ( self : Optional[Any] ):
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Dict = None
if self.use_labels:
_a : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
_a : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Optional[int] ):
_a : Union[str, Any] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 192, 384, 768],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,backbone_out_indices=self.backbone_out_indices ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,is_hybrid=self.is_hybrid ,backbone_config=_UpperCAmelCase ,backbone_featmap_shape=self.backbone_featmap_shape ,)
def __lowercase ( self : Any ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int] ):
_a : Tuple = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : str ,_UpperCAmelCase : Dict ,_UpperCAmelCase : List[str] ,_UpperCAmelCase : Union[str, Any] ):
_a : Dict = self.num_labels
_a : List[Any] = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Union[str, Any] = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape ,(self.batch_size, self.image_size, self.image_size) )
def __lowercase ( self : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ,_UpperCAmelCase : str ):
_a : Union[str, Any] = self.num_labels
_a : Union[str, Any] = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Tuple = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __lowercase ( self : Dict ):
_a : Dict = self.prepare_config_and_inputs()
_a , _a , _a : Any = config_and_inputs
_a : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : List[Any] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase : Tuple = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase : Dict = False
lowerCAmelCase : Any = False
lowerCAmelCase : Optional[Any] = False
def __lowercase ( self : Optional[int] ):
_a : Union[str, Any] = DPTModelTester(self )
_a : List[Any] = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='DPT does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
def __lowercase ( self : str ):
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
_a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase ,nn.Linear ) )
def __lowercase ( self : Dict ):
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = model_class(_UpperCAmelCase )
_a : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : str = [*signature.parameters.keys()]
_a : Optional[Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : Any ):
_a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : List[str] ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def __lowercase ( self : Optional[int] ):
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def __lowercase ( self : str ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[Any] = True
if model_class in get_values(_UpperCAmelCase ):
continue
_a : List[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : int = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : str = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Optional[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_a : Optional[int] = False
_a : Optional[int] = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
_a : Any = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : str = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : Optional[Any] ):
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : List[str] = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
_a : Optional[Any] = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
_a : Optional[Any] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_a : int = [F"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __lowercase ( self : Tuple ):
pass
@slow
def __lowercase ( self : Tuple ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_a : int = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowercase ( self : str ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = 'add'
with self.assertRaises(_UpperCAmelCase ):
_a : Dict = DPTForDepthEstimation(_UpperCAmelCase )
def __lowerCamelCase ( ) -> Tuple:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ):
_a : Optional[int] = DPTImageProcessor.from_pretrained('Intel/dpt-hybrid-midas' )
_a : List[Any] = DPTForDepthEstimation.from_pretrained('Intel/dpt-hybrid-midas' ).to(_UpperCAmelCase )
_a : Optional[int] = prepare_img()
_a : str = image_processor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : int = model(**_UpperCAmelCase )
_a : Union[str, Any] = outputs.predicted_depth
# verify the predicted depth
_a : List[str] = torch.Size((1, 384, 384) )
self.assertEqual(predicted_depth.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 100 ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 | 0 |
import argparse
import logging
import sys
from unittest.mock import patch
import run_glue_deebert
from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow
logging.basicConfig(level=logging.DEBUG)
__A = logging.getLogger()
def lowerCamelCase_ ( ) -> str:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('-f' )
__lowerCamelCase = parser.parse_args()
return args.f
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def lowercase_ ( self ) -> None:
'''simple docstring'''
__lowerCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> Dict:
'''simple docstring'''
__lowerCamelCase = get_gpu_count()
if n_gpu > 1:
pass
# XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560
# script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py"
# distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split()
# cmd = [sys.executable] + distributed_args + args
# execute_subprocess_async(cmd, env=self.get_env())
# XXX: test the results - need to save them first into .json file
else:
args.insert(0 , 'run_glue_deebert.py' )
with patch.object(lowerCamelCase__ , 'argv' , lowerCamelCase__ ):
__lowerCamelCase = run_glue_deebert.main()
for value in result.values():
self.assertGreaterEqual(lowerCamelCase__ , 0.6_66 )
@slow
@require_torch_non_multi_gpu
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
__lowerCamelCase = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split()
self.run_and_check(lowerCamelCase__ )
__lowerCamelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
__lowerCamelCase = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split()
self.run_and_check(lowerCamelCase__ )
| 90 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 10
@require_sentencepiece
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[Any] = True
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__lowercase = sp.SentencePieceProcessor()
spm_model.Load(_UpperCAmelCase )
__lowercase = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )]
__lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = '<pad>'
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 10_01 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , )
__lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium"
lowerCAmelCase__ : Dict = "C'est trop cool"
lowerCAmelCase__ : List[Any] = "Esto es genial"
@classmethod
def a__ ( cls : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def a__ ( self : str ) -> int:
"""simple docstring"""
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
__lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2]
__lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'fr'
__lowercase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _UpperCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__lowercase = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 325 | 0 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 0.0
__UpperCamelCase = 1
__UpperCamelCase = 1
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(self.num_layers):
SCREAMING_SNAKE_CASE_ : int = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE_ : Tuple = FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_)
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_)
SCREAMING_SNAKE_CASE_ : int = resnets
SCREAMING_SNAKE_CASE_ : int = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : List[Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Tuple=True):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ()
for resnet, attn in zip(self.resnets , self.attentions):
SCREAMING_SNAKE_CASE_ : List[Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = attn(lowercase_ , lowercase_ , deterministic=lowercase_)
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE_ : Any = self.downsamplers_a(lowercase_)
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 0.0
__UpperCamelCase = 1
__UpperCamelCase = True
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = []
for i in range(self.num_layers):
SCREAMING_SNAKE_CASE_ : Any = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE_ : int = FlaxResnetBlockaD(
in_channels=lowercase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_)
SCREAMING_SNAKE_CASE_ : int = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Optional[int] , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[str]=True):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE_ : List[Any] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_)
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.downsamplers_a(lowercase_)
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 0.0
__UpperCamelCase = 1
__UpperCamelCase = 1
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = []
SCREAMING_SNAKE_CASE_ : Optional[int] = []
for i in range(self.num_layers):
SCREAMING_SNAKE_CASE_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE_ : Any = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[int] = resnets
SCREAMING_SNAKE_CASE_ : Tuple = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE_ : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Tuple=True):
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions):
# pop res hidden states
SCREAMING_SNAKE_CASE_ : List[Any] = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE_ : Optional[Any] = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE_ : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
SCREAMING_SNAKE_CASE_ : int = resnet(lowercase_ , lowercase_ , deterministic=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = attn(lowercase_ , lowercase_ , deterministic=lowercase_)
if self.add_upsample:
SCREAMING_SNAKE_CASE_ : int = self.upsamplers_a(lowercase_)
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 42
__UpperCamelCase = 0.0
__UpperCamelCase = 1
__UpperCamelCase = True
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = []
for i in range(self.num_layers):
SCREAMING_SNAKE_CASE_ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE_ : List[Any] = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE_ : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : List[Any] , lowercase_ : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple=True):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE_ : Optional[Any] = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE_ : Dict = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
SCREAMING_SNAKE_CASE_ : Optional[int] = resnet(lowercase_ , lowercase_ , deterministic=lowercase_)
if self.add_upsample:
SCREAMING_SNAKE_CASE_ : Any = self.upsamplers_a(lowercase_)
return hidden_states
class lowerCAmelCase__ ( nn.Module ):
'''simple docstring'''
__UpperCamelCase = 42
__UpperCamelCase = 0.0
__UpperCamelCase = 1
__UpperCamelCase = 1
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = jnp.floataa
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE_ : str = []
for _ in range(self.num_layers):
SCREAMING_SNAKE_CASE_ : Any = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(lowercase_)
SCREAMING_SNAKE_CASE_ : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(lowercase_)
SCREAMING_SNAKE_CASE_ : str = resnets
SCREAMING_SNAKE_CASE_ : Tuple = attentions
def __call__( self : Any , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : Tuple , lowercase_ : Optional[Any]=True):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.resnets[0](lowercase_ , lowercase_)
for attn, resnet in zip(self.attentions , self.resnets[1:]):
SCREAMING_SNAKE_CASE_ : Optional[Any] = attn(lowercase_ , lowercase_ , deterministic=lowercase_)
SCREAMING_SNAKE_CASE_ : Any = resnet(lowercase_ , lowercase_ , deterministic=lowercase_)
return hidden_states
| 91 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = version.parse("1.12" )
@property
def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : int ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return 12
def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 325 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : complex , SCREAMING_SNAKE_CASE_ : str = "x" , SCREAMING_SNAKE_CASE_ : float = 10**-10 , SCREAMING_SNAKE_CASE_ : int = 1 , ):
__lowerCAmelCase = symbols(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = lambdify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = lambdify(SCREAMING_SNAKE_CASE_ , diff(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCAmelCase = starting_point
while True:
if diff_function(SCREAMING_SNAKE_CASE_ ) != 0:
__lowerCAmelCase = prev_guess - multiplicity * func(SCREAMING_SNAKE_CASE_ ) / diff_function(
SCREAMING_SNAKE_CASE_ )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__lowerCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'''{newton_raphson("log(y) - 1", 2, variable="y")}''',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
| 92 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowercase = self.embedder(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def a__ ( self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = self.layers(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_UpperCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = RegNetConfig
lowerCAmelCase__ : Optional[int] = "regnet"
lowerCAmelCase__ : Dict = "pixel_values"
lowerCAmelCase__ : List[str] = True
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = value
SCREAMING_SNAKE_CASE__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = RegNetEmbeddings(_UpperCAmelCase )
__lowercase = RegNetEncoder(_UpperCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = RegNetModel(_UpperCAmelCase )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = 'single_label_classification'
else:
__lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 325 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class lowerCAmelCase__ :
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
_lowercase : Optional[int] = namedtuple("CoinsDistribResult", "moves excess")
def snake_case_ ( __SCREAMING_SNAKE_CASE : TreeNode | None ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(__SCREAMING_SNAKE_CASE ) != count_coins(__SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(__SCREAMING_SNAKE_CASE : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase_ , lowercase_ : Tuple = get_distrib(node.left )
lowercase_ , lowercase_ : Dict = get_distrib(node.right )
lowercase_ : Dict = 1 - left_distrib_excess
lowercase_ : Optional[int] = 1 - right_distrib_excess
lowercase_ : Tuple = (
left_distrib_moves
+ right_distrib_moves
+ abs(__SCREAMING_SNAKE_CASE )
+ abs(__SCREAMING_SNAKE_CASE )
)
lowercase_ : int = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return get_distrib(__SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 0 |
import numpy as np
from numpy import ndarray
from scipy.optimize import Bounds, LinearConstraint, minimize
def __lowerCamelCase ( UpperCAmelCase_ : ndarray ):
"""simple docstring"""
return np.dot(UpperCAmelCase_ , UpperCAmelCase_ )
class _snake_case :
def __init__( self , *,
_lowerCamelCase = np.inf , _lowerCamelCase = "linear" , _lowerCamelCase = 0.0 , ):
a :List[str] = regularization
a :Optional[Any] = gamma
if kernel == "linear":
a :Optional[Any] = self.__linear
elif kernel == "rbf":
if self.gamma == 0:
raise ValueError('''rbf kernel requires gamma''' )
if not isinstance(self.gamma , (float, int) ):
raise ValueError('''gamma must be float or int''' )
if not self.gamma > 0:
raise ValueError('''gamma must be > 0''' )
a :List[Any] = self.__rbf
# in the future, there could be a default value like in sklearn
# sklear: def_gamma = 1/(n_features * X.var()) (wiki)
# previously it was 1/(n_features)
else:
a :Dict = F'''Unknown kernel: {kernel}'''
raise ValueError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return np.dot(_lowerCamelCase , _lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
return np.exp(-(self.gamma * norm_squared(vectora - vectora )) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :str = observations
a :Any = classes
# using Wolfe's Dual to calculate w.
# Primal problem: minimize 1/2*norm_squared(w)
# constraint: yn(w . xn + b) >= 1
#
# With l a vector
# Dual problem: maximize sum_n(ln) -
# 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm))
# constraint: self.C >= ln >= 0
# and sum_n(ln*yn) = 0
# Then we get w using w = sum_n(ln*yn*xn)
# At the end we can get b ~= mean(yn - w . xn)
#
# Since we use kernels, we only need l_star to calculate b
# and to classify observations
((a) , ) :Tuple = np.shape(_lowerCamelCase )
def to_minimize(_lowerCamelCase ) -> float:
a :Union[str, Any] = 0
((a) , ) :Tuple = np.shape(_lowerCamelCase )
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += (
candidate[i]
* candidate[j]
* classes[i]
* classes[j]
* self.kernel(observations[i] , observations[j] )
)
return 1 / 2 * s - sum(_lowerCamelCase )
a :str = LinearConstraint(_lowerCamelCase , 0 , 0 )
a :Tuple = Bounds(0 , self.regularization )
a :List[str] = minimize(
_lowerCamelCase , np.ones(_lowerCamelCase ) , bounds=_lowerCamelCase , constraints=[ly_contraint] ).x
a :str = l_star
# calculating mean offset of separation plane to points
a :Tuple = 0
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
s += classes[i] - classes[i] * self.optimum[i] * self.kernel(
observations[i] , observations[j] )
a :Optional[Any] = s / n
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[str] = sum(
self.optimum[n]
* self.classes[n]
* self.kernel(self.observations[n] , _lowerCamelCase )
for n in range(len(self.classes ) ) )
return 1 if s + self.offset >= 0 else -1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=4 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.0 , lowerCAmelCase__=0.1 , lowerCAmelCase__=True , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ) -> Any:
'''simple docstring'''
a__ : str =parent
a__ : Dict =batch_size
a__ : List[str] =seq_length
a__ : Any =is_training
a__ : Tuple =use_input_mask
a__ : List[str] =use_token_type_ids
a__ : Union[str, Any] =use_labels
a__ : Optional[int] =vocab_size
a__ : int =hidden_size
a__ : int =num_hidden_layers
a__ : List[Any] =num_attention_heads
a__ : str =intermediate_multiple_size
a__ : List[str] =hidden_act
a__ : Optional[int] =hidden_dropout
a__ : List[str] =attention_dropout
a__ : int =weight_tying
a__ : Optional[Any] =max_position_embeddings
a__ : Any =type_vocab_size
a__ : Optional[int] =type_sequence_label_size
a__ : Optional[Any] =initializer_range
a__ : Dict =num_labels
a__ : List[str] =num_choices
a__ : Union[str, Any] =scope
def _lowercase ( self ) -> Dict:
'''simple docstring'''
a__ : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a__ : Optional[int] =None
if self.use_input_mask:
a__ : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
a__ : Dict =None
if self.use_labels:
a__ : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a__ : Any =self.get_config()
return config, input_ids, input_mask, token_labels
def _lowercase ( self ) -> Dict:
'''simple docstring'''
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Tuple =self.prepare_config_and_inputs()
a__ : List[str] =True
return config, input_ids, input_mask, token_labels
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
a__ : Any =GPTNeoXJapaneseModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : List[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
a__ : Union[str, Any] =model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ : Optional[int] =True
a__ : Dict =GPTNeoXJapaneseModel(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[int] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : Optional[int] =GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
a__ : Optional[Any] =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ : int =True
a__ : str =GPTNeoXJapaneseForCausalLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
# first forward pass
a__ : Any =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , use_cache=lowerCAmelCase__ )
a__ : List[str] =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
a__ : Tuple =ids_tensor((self.batch_size, 3) , config.vocab_size )
a__ : List[Any] =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
a__ : List[str] =torch.cat([input_ids, next_tokens] , dim=-1 )
a__ : List[str] =torch.cat([input_mask, next_mask] , dim=-1 )
a__ : int =model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ )
a__ : Dict =output_from_no_past["hidden_states"][0]
a__ : Any =model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , output_hidden_states=lowerCAmelCase__ , )["hidden_states"][0]
# select random slice
a__ : List[str] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
a__ : List[Any] =output_from_no_past[:, -3:, random_slice_idx].detach()
a__ : Any =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ : List[Any] =self.prepare_config_and_inputs()
a__ , a__ , a__ , a__ : int =config_and_inputs
a__ : Optional[Any] ={"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
_lowercase : Optional[int] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
_lowercase : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
_lowercase : Optional[int] = (
{"""feature-extraction""": GPTNeoXJapaneseModel, """text-generation""": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
_lowercase : int = False
_lowercase : Optional[Any] = False
_lowercase : Tuple = False
_lowercase : int = False
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] =GPTNeoXJapaneseModelTester(self )
a__ : Union[str, Any] =ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=3_7 )
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Any:
'''simple docstring'''
a__ , a__ , a__ , a__ : List[str] =self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_decoder()
a__ : str =None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Optional[int]:
'''simple docstring'''
a__ , a__ , a__ , a__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> List[Any]:
'''simple docstring'''
a__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase__ )
@slow
def _lowercase ( self ) -> int:
'''simple docstring'''
a__ : Tuple ="abeja/gpt-neox-japanese-2.7b"
a__ : Any =["データサイエンティストとは、", "100年後に必要とされる会社は、", "フルリモートの環境で働くために必要なことは、", "国境の長いトンネルを抜けると", "美味しい日本食といえば、"]
a__ : Dict =[
"データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。",
"100年後に必要とされる会社は、「人」が中心の会社です。",
"フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。",
"国境の長いトンネルを抜けると、そこは雪国だった。",
"美味しい日本食といえば、やっぱりお寿司ですよね。",
]
a__ : int =GPTNeoXJapaneseTokenizer.from_pretrained(lowerCAmelCase__ )
a__ : Dict =GPTNeoXJapaneseForCausalLM.from_pretrained(lowerCAmelCase__ )
a__ : List[str] =[]
for prompt in prompts:
a__ : List[str] =tokenizer(lowerCAmelCase__ , return_tensors="pt" ).input_ids
a__ : int =model.generate(lowerCAmelCase__ , max_length=5_0 )
a__ : Dict =tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 95 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def A_ ( self , lowercase , lowercase , lowercase ):
_lowerCamelCase : Optional[int] = TextaTextGenerationPipeline(model=lowercase , tokenizer=lowercase )
return generator, ["Something to write", "Something else"]
def A_ ( self , lowercase , lowercase ):
_lowerCamelCase : int = generator('Something there' )
self.assertEqual(lowercase , [{'generated_text': ANY(lowercase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_lowerCamelCase : Optional[Any] = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{'generated_text': ANY(lowercase )}, {'generated_text': ANY(lowercase )}],
[{'generated_text': ANY(lowercase )}, {'generated_text': ANY(lowercase )}],
] , )
_lowerCamelCase : Union[str, Any] = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=lowercase )
self.assertEqual(
lowercase , [
[{'generated_text': ANY(lowercase )}, {'generated_text': ANY(lowercase )}],
[{'generated_text': ANY(lowercase )}, {'generated_text': ANY(lowercase )}],
] , )
with self.assertRaises(lowercase ):
generator(4 )
@require_torch
def A_ ( self ):
_lowerCamelCase : int = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_lowerCamelCase : Dict = generator('Something there' , do_sample=lowercase )
self.assertEqual(lowercase , [{'generated_text': ''}] )
_lowerCamelCase : str = 3
_lowerCamelCase : str = generator(
'Something there' , num_return_sequences=lowercase , num_beams=lowercase , )
_lowerCamelCase : int = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(lowercase , lowercase )
_lowerCamelCase : int = generator('This is a test' , do_sample=lowercase , num_return_sequences=2 , return_tensors=lowercase )
self.assertEqual(
lowercase , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_lowerCamelCase : Optional[int] = generator.model.config.eos_token_id
_lowerCamelCase : Union[str, Any] = '<pad>'
_lowerCamelCase : List[str] = generator(
['This is a test', 'This is a second test'] , do_sample=lowercase , num_return_sequences=2 , batch_size=2 , return_tensors=lowercase , )
self.assertEqual(
lowercase , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def A_ ( self ):
_lowerCamelCase : List[str] = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_lowerCamelCase : List[str] = generator('Something there' , do_sample=lowercase )
self.assertEqual(lowercase , [{'generated_text': ''}] ) | 96 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [torch.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [tf.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowercase = [tf.convert_to_tensor(_UpperCAmelCase )]
__lowercase = [torch.tensor(_UpperCAmelCase )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 325 | 0 |
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowercase ( A__ ):
"""simple docstring"""
def __init__( self , UpperCamelCase_="" , UpperCamelCase_="train" ):
'''simple docstring'''
assert os.path.isdir(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = []
UpperCamelCase__ :Dict = os.listdir(UpperCamelCase_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
UpperCamelCase__ :Optional[Any] = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
if not os.path.isfile(UpperCamelCase_ ):
continue
self.documents.append(UpperCamelCase_ )
def __len__( self ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :str = self.documents[idx]
UpperCamelCase__ :str = document_path.split('''/''' )[-1]
with open(UpperCamelCase_ , encoding='''utf-8''' ) as source:
UpperCamelCase__ :int = source.read()
UpperCamelCase__ , UpperCamelCase__ :int = process_story(UpperCamelCase_ )
return document_name, story_lines, summary_lines
def a ( __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :str = list(filter(lambda __a : len(__a ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
UpperCamelCase__ :Optional[Any] = [_add_missing_period(__a ) for line in nonempty_lines]
# gather article lines
UpperCamelCase__ :Dict = []
UpperCamelCase__ :Optional[Any] = deque(__a )
while True:
try:
UpperCamelCase__ :List[Any] = lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(__a )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
UpperCamelCase__ :Tuple = list(filter(lambda __a : not t.startswith('''@highlight''' ) , __a ) )
return story_lines, summary_lines
def a ( __a ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :int = ['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def a ( __a , __a , __a ) -> Optional[int]:
'''simple docstring'''
if len(__a ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__a )) )
return sequence
def a ( __a , __a ) -> str:
'''simple docstring'''
UpperCamelCase__ :List[str] = torch.ones_like(__a )
UpperCamelCase__ :Tuple = sequence == pad_token_id
UpperCamelCase__ :List[Any] = 0
return mask
def a ( __a , __a , __a ) -> Tuple:
'''simple docstring'''
UpperCamelCase__ :Any = [tokenizer.encode(__a ) for line in story_lines]
UpperCamelCase__ :List[Any] = [token for sentence in story_lines_token_ids for token in sentence]
UpperCamelCase__ :int = [tokenizer.encode(__a ) for line in summary_lines]
UpperCamelCase__ :Tuple = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def a ( __a , __a ) -> Dict:
'''simple docstring'''
UpperCamelCase__ :List[Any] = []
for sequence in batch:
UpperCamelCase__ :Dict = -1
UpperCamelCase__ :List[str] = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__a )
return torch.tensor(__a ) | 97 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ : int = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : List[str] = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : int = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 98 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325 | 0 |
from string import ascii_lowercase, ascii_uppercase
def A_ ( A__ ) -> str:
if not sentence:
return ""
a__ : Tuple = dict(zip(A__ , A__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 99 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
for attribute in key.split('.' ):
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase = None
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowercase = True
elif name.split('.' )[0] == "proj":
__lowercase = fairseq_model.proj
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowercase = 'weight_g'
elif "weight_v" in name:
__lowercase = 'weight_v'
elif "bias" in name:
__lowercase = 'bias'
elif "weight" in name:
__lowercase = 'weight'
else:
__lowercase = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
__lowercase = full_name.split('conv_layers.' )[-1]
__lowercase = name.split('.' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowercase = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__lowercase = f.readlines()
__lowercase = [line.split(' ' )[0] for line in lines]
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
__lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase = model[0].eval()
# set weights for wav2vec2 encoder
__lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowercase = False
# add projection layer
__lowercase = nn.Parameter(projection_layer.weight )
__lowercase = nn.Parameter(projection_layer.bias )
__lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = hf_wavavec.config.to_dict()
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer.eos_token_id
__lowercase = 'speech_to_text_2'
__lowercase = 'wav2vec2'
__lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 325 | 0 |
"""simple docstring"""
import requests
__magic_name__ = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def _lowerCAmelCase ( UpperCamelCase_ ):
# fetching a list of articles in json format
__SCREAMING_SNAKE_CASE = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(f"{i}.) {article['title']}" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 100 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class lowercase :
def __init__( self ,A__ ,A__=1_3 ,A__=1_0 ,A__=3 ,A__=2 ,A__=2 ,A__=2 ,A__=True ,A__=True ,A__=3_2 ,A__=5 ,A__=4 ,A__=3_7 ,A__="gelu" ,A__=0.1 ,A__=0.1 ,A__=1_0 ,A__=0.02 ,A__=0.9 ,A__=None ,):
lowercase = parent
lowercase = batch_size
lowercase = image_size
lowercase = num_channels
lowercase = patch_size
lowercase = tubelet_size
lowercase = num_frames
lowercase = is_training
lowercase = use_labels
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = type_sequence_label_size
lowercase = initializer_range
lowercase = mask_ratio
lowercase = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowercase = (image_size // patch_size) ** 2
lowercase = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowercase = int(mask_ratio * self.seq_length)
def A__ ( self):
lowercase = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
lowercase = None
if self.use_labels:
lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size)
lowercase = self.get_config()
return config, pixel_values, labels
def A__ ( self):
return VideoMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,tubelet_size=self.tubelet_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A__ ,initializer_range=self.initializer_range ,)
def A__ ( self ,A__ ,A__ ,A__):
lowercase = VideoMAEModel(config=A__)
model.to(A__)
model.eval()
lowercase = model(A__)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size))
def A__ ( self ,A__ ,A__ ,A__):
lowercase = VideoMAEForPreTraining(A__)
model.to(A__)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.num_masks,))
lowercase = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
lowercase = mask.expand(self.batch_size ,-1).bool()
lowercase = model(A__ ,A__)
# model only returns predictions for masked patches
lowercase = mask.sum().item()
lowercase = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_masked_patches, decoder_num_labels))
def A__ ( self):
lowercase = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase = config_and_inputs
lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : str =(
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase_ : Tuple =(
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase_ : List[str] =False
lowercase_ : str =False
lowercase_ : int =False
lowercase_ : Dict =False
def A__ ( self):
lowercase = VideoMAEModelTester(self)
lowercase = ConfigTester(self ,config_class=A__ ,has_text_modality=A__ ,hidden_size=3_7)
def A__ ( self ,A__ ,A__ ,A__=False):
lowercase = copy.deepcopy(A__)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowercase = torch.ones((self.model_tester.num_masks,))
lowercase = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
lowercase = mask.expand(self.model_tester.batch_size ,-1).bool()
lowercase = bool_masked_pos.to(A__)
if return_labels:
if model_class in [
*get_values(A__),
]:
lowercase = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A__)
return inputs_dict
def A__ ( self):
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''')
def A__ ( self):
pass
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__)
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module))
lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A__ ,nn.Linear))
def A__ ( self):
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = model_class(A__)
lowercase = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase = [*signature.parameters.keys()]
lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A__)
def A__ ( self):
lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A__)
@slow
def A__ ( self):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase = VideoMAEModel.from_pretrained(A__)
self.assertIsNotNone(A__)
def A__ ( self):
if not self.has_attentions:
pass
else:
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
lowercase = True
for model_class in self.all_model_classes:
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowercase = True
lowercase = False
lowercase = True
lowercase = model_class(A__)
model.to(A__)
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ ,A__))
lowercase = outputs.attentions
self.assertEqual(len(A__) ,self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowercase = True
lowercase = model_class(A__)
model.to(A__)
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ ,A__))
lowercase = outputs.attentions
self.assertEqual(len(A__) ,self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
lowercase = len(A__)
# Check attention is always last and order is fine
lowercase = True
lowercase = True
lowercase = model_class(A__)
model.to(A__)
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ ,A__))
self.assertEqual(out_len + 1 ,len(A__))
lowercase = outputs.attentions
self.assertEqual(len(A__) ,self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) ,[self.model_tester.num_attention_heads, seq_len, seq_len] ,)
def A__ ( self):
def check_hidden_states_output(A__ ,A__ ,A__):
lowercase = model_class(A__)
model.to(A__)
model.eval()
with torch.no_grad():
lowercase = model(**self._prepare_for_class(A__ ,A__))
lowercase = outputs.hidden_states
lowercase = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(A__) ,A__)
lowercase = self.model_tester.seq_length - self.model_tester.num_masks
lowercase = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) ,[seq_length, self.model_tester.hidden_size] ,)
lowercase , lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase = True
check_hidden_states_output(A__ ,A__ ,A__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase = True
check_hidden_states_output(A__ ,A__ ,A__)
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''')
def A__ ( self):
pass
def UpperCamelCase ( ):
'''simple docstring'''
lowercase = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowercase = np.load(lowerCAmelCase__ )
return list(lowerCAmelCase__ )
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def A__ ( self):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def A__ ( self):
lowercase = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''').to(
A__)
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(A__ ,return_tensors='''pt''').to(A__)
# forward pass
with torch.no_grad():
lowercase = model(**A__)
# verify the logits
lowercase = torch.Size((1, 4_0_0))
self.assertEqual(outputs.logits.shape ,A__)
lowercase = torch.tensor([0.3669, -0.0688, -0.2421]).to(A__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,A__ ,atol=1E-4))
@slow
def A__ ( self):
lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''').to(A__)
lowercase = self.default_image_processor
lowercase = prepare_video()
lowercase = image_processor(A__ ,return_tensors='''pt''').to(A__)
# add boolean mask, indicating which patches to mask
lowercase = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' ,filename='''bool_masked_pos.pt''')
lowercase = torch.load(A__)
# forward pass
with torch.no_grad():
lowercase = model(**A__)
# verify the logits
lowercase = torch.Size([1, 1_4_0_8, 1_5_3_6])
lowercase = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] ,device=A__)
self.assertEqual(outputs.logits.shape ,A__)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,A__ ,atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `True`)
lowercase = torch.tensor([0.5142] ,device=A__)
self.assertTrue(torch.allclose(outputs.loss ,A__ ,atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `False`)
lowercase = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ,norm_pix_loss=A__).to(
A__)
with torch.no_grad():
lowercase = model(**A__)
lowercase = torch.tensor(torch.tensor([0.6469]) ,device=A__)
self.assertTrue(torch.allclose(outputs.loss ,A__ ,atol=1E-4))
| 101 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["vqvae"]
def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_UpperCAmelCase , device=self.device , )
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase )
__lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 2_55) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=_UpperCAmelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _UpperCAmelCase ):
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample']
else:
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
if isinstance(self.scheduler , _UpperCAmelCase ):
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
else:
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_UpperCAmelCase )['sample']
__lowercase = (images / 2 + 0.5).clamp(0 , 1 )
__lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowercase = (images * 2_55).round().astype('uint8' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) )
@torch.no_grad()
def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 2_55) * 2 - 1
__lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor:
"""simple docstring"""
__lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
| 325 | 0 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE : List[str] = TypeVar("""_T""")
class _UpperCAmelCase ( Generic[_T] ):
'''simple docstring'''
def __init__(self , a_ = None ):
'''simple docstring'''
__snake_case : list[_T] = list(iterable or [] )
__snake_case : list[_T] = []
def __len__(self ):
'''simple docstring'''
return len(self._stacka ) + len(self._stacka )
def __repr__(self ):
'''simple docstring'''
return f"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
self._stacka.append(a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = self._stacka.pop
__snake_case : Optional[int] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('''Queue is empty''' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 102 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ = 10
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowercase = one_third - 1
elif array[two_third] < target:
__lowercase = two_third + 1
else:
__lowercase = one_third + 1
__lowercase = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 325 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : List[str] = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
A__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 103 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325 | 0 |
'''simple docstring'''
import operator as op
def _A ( A__ ):
"""simple docstring"""
__lowercase = []
__lowercase = lambda A__ , A__ : int(x / y ) # noqa: E731 integer division operation
__lowercase = {
'''^''': op.pow,
'''*''': op.mul,
'''/''': div,
'''+''': op.add,
'''-''': op.sub,
} # operators & their respective operation
# print table header
print('''Symbol'''.center(8 ) , '''Action'''.center(12 ) , '''Stack''' , sep=''' | ''' )
print('''-''' * (30 + len(A__ )) )
for x in post_fix:
if x.isdigit(): # if x in digit
stack.append(A__ ) # append x to stack
# output in tabular format
print(x.rjust(8 ) , ('''push(''' + x + ''')''').ljust(12 ) , ''','''.join(A__ ) , sep=''' | ''' )
else:
__lowercase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + b + ''')''').ljust(12 ) , ''','''.join(A__ ) , sep=''' | ''' )
__lowercase = stack.pop() # pop stack
# output in tabular format
print(''''''.rjust(8 ) , ('''pop(''' + a + ''')''').ljust(12 ) , ''','''.join(A__ ) , sep=''' | ''' )
stack.append(
str(opr[x](int(A__ ) , int(A__ ) ) ) ) # evaluate the 2 values popped from stack & push result to stack
# output in tabular format
print(
x.rjust(8 ) , ('''push(''' + a + x + b + ''')''').ljust(12 ) , ''','''.join(A__ ) , sep=''' | ''' , )
return int(stack[0] )
if __name__ == "__main__":
lowerCAmelCase__ = input('''\n\nEnter a Postfix Equation (space separated) = ''').split(''' ''')
print('''\n\tResult = ''', solve(Postfix))
| 104 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 0 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
a : List[Any] = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 105 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325 | 0 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
__UpperCamelCase : str = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = {
'''Intel/dpt-large''': '''https://huggingface.co/Intel/dpt-large/resolve/main/config.json''',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowercase__ = "dpt"
def __init__( self : Tuple ,lowercase_ : List[Any]=7_6_8 ,lowercase_ : List[Any]=1_2 ,lowercase_ : Optional[Any]=1_2 ,lowercase_ : List[Any]=3_0_7_2 ,lowercase_ : str="gelu" ,lowercase_ : Dict=0.0 ,lowercase_ : Optional[Any]=0.0 ,lowercase_ : List[str]=0.02 ,lowercase_ : List[str]=1E-12 ,lowercase_ : Union[str, Any]=3_8_4 ,lowercase_ : Any=1_6 ,lowercase_ : Tuple=3 ,lowercase_ : List[str]=False ,lowercase_ : Dict=True ,lowercase_ : List[str]=[2, 5, 8, 1_1] ,lowercase_ : Optional[Any]="project" ,lowercase_ : Union[str, Any]=[4, 2, 1, 0.5] ,lowercase_ : int=[9_6, 1_9_2, 3_8_4, 7_6_8] ,lowercase_ : List[str]=2_5_6 ,lowercase_ : int=-1 ,lowercase_ : List[Any]=False ,lowercase_ : List[Any]=True ,lowercase_ : str=0.4 ,lowercase_ : Optional[Any]=2_5_5 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : List[Any]=[1, 1_0_2_4, 2_4, 2_4] ,lowercase_ : int=[0, 1] ,lowercase_ : Optional[int]=None ,**lowercase_ : Optional[Any] ,):
super().__init__(**lowercase_ )
lowerCAmelCase__ : List[str] = hidden_size
lowerCAmelCase__ : Any = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowerCAmelCase__ : Any = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
lowerCAmelCase__ : Optional[Any] = BitConfig(**lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
lowerCAmelCase__ : Tuple = BitConfig(**lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
lowerCAmelCase__ : Union[str, Any] = backbone_config
else:
raise ValueError(
F'backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.' )
lowerCAmelCase__ : Optional[Any] = backbone_featmap_shape
lowerCAmelCase__ : Union[str, Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Dict = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : str = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : List[Any] = layer_norm_eps
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Union[str, Any] = qkv_bias
lowerCAmelCase__ : Dict = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
lowerCAmelCase__ : str = readout_type
lowerCAmelCase__ : Dict = reassemble_factors
lowerCAmelCase__ : Optional[int] = neck_hidden_sizes
lowerCAmelCase__ : Union[str, Any] = fusion_hidden_size
lowerCAmelCase__ : str = head_in_index
lowerCAmelCase__ : Optional[int] = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ : Tuple = use_auxiliary_head
lowerCAmelCase__ : str = auxiliary_loss_weight
lowerCAmelCase__ : Any = semantic_loss_ignore_index
lowerCAmelCase__ : List[Any] = semantic_classifier_dropout
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
lowerCAmelCase__ : List[Any] = self.backbone_config.to_dict()
lowerCAmelCase__ : List[Any] = self.__class__.model_type
return output
| 106 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
__lowerCAmelCase : Optional[int] = False
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Dict ) -> int:
a = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = "A painting of a squirrel eating a burger "
a = torch.manual_seed(0 )
a = pipe(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowerCamelCase )
a = VersatileDiffusionTextToImagePipeline.from_pretrained(__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = generator.manual_seed(0 )
a = pipe(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __UpperCAmelCase ( self : str ) -> List[str]:
a = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
a = "A painting of a squirrel eating a burger "
a = torch.manual_seed(0 )
a = pipe(
prompt=__lowerCamelCase , generator=__lowerCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
a = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 107 |
import logging
import os
from .state import PartialState
class A__ ( logging.LoggerAdapter ):
@staticmethod
def a__ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase )
__lowercase = kwargs.pop('in_order' , _UpperCAmelCase )
if self.isEnabledFor(_UpperCAmelCase ):
if self._should_log(_UpperCAmelCase ):
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif in_order:
__lowercase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]:
if log_level is None:
__lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE )
__lowercase = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 325 | 0 |
"""simple docstring"""
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCAmelCase__ = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : Optional[int] =field(
default=1_28 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a : bool =field(
default=lowercase , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
a : bool =field(
default=lowercase , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
a : Optional[int] =field(
default=lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a : Optional[int] =field(
default=lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
a : Optional[int] =field(
default=lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : str =field(
default=lowercase , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
a : str =field(
default=lowercase , metadata={"help": "Evaluation language. Also train language if `train_language` is set to None."} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Train language if it is different from the evaluation language."} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a : Optional[str] =field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a : Optional[bool] =field(
default=lowercase , metadata={"help": "arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"} , )
a : bool =field(
default=lowercase , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
a : str =field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a : bool =field(
default=lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
a : bool =field(
default=lowercase , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" , SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase : Union[str, Any] = training_args.get_process_log_level()
logger.setLevel(SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
lowerCAmelCase : Tuple = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowerCAmelCase : Union[str, Any] = load_dataset(
"xnli" , model_args.language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase : List[Any] = load_dataset(
"xnli" , model_args.train_language , split="train" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase : int = train_dataset.features["label"].names
if training_args.do_eval:
lowerCAmelCase : List[Any] = load_dataset(
"xnli" , model_args.language , split="validation" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase : Dict = eval_dataset.features["label"].names
if training_args.do_predict:
lowerCAmelCase : List[str] = load_dataset(
"xnli" , model_args.language , split="test" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase : List[Any] = predict_dataset.features["label"].names
# Labels
lowerCAmelCase : Optional[int] = len(SCREAMING_SNAKE_CASE )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=SCREAMING_SNAKE_CASE , idalabel={str(SCREAMING_SNAKE_CASE ): label for i, label in enumerate(SCREAMING_SNAKE_CASE )} , labelaid={label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )} , finetuning_task="xnli" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowerCAmelCase : Optional[int] = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowerCAmelCase : Union[str, Any] = False
def preprocess_function(SCREAMING_SNAKE_CASE : Dict ):
# Tokenize the texts
return tokenizer(
examples["premise"] , examples["hypothesis"] , padding=SCREAMING_SNAKE_CASE , max_length=data_args.max_seq_length , truncation=SCREAMING_SNAKE_CASE , )
if training_args.do_train:
if data_args.max_train_samples is not None:
lowerCAmelCase : str = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
lowerCAmelCase : Union[str, Any] = train_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowerCAmelCase : Dict = train_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on train dataset" , )
# Log a few random samples from the training set:
for index in random.sample(range(len(SCREAMING_SNAKE_CASE ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowerCAmelCase : Dict = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
lowerCAmelCase : int = eval_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowerCAmelCase : List[Any] = eval_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on validation dataset" , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowerCAmelCase : Any = min(len(SCREAMING_SNAKE_CASE ) , data_args.max_predict_samples )
lowerCAmelCase : List[Any] = predict_dataset.select(range(SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowerCAmelCase : Optional[int] = predict_dataset.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , load_from_cache_file=not data_args.overwrite_cache , desc="Running tokenizer on prediction dataset" , )
# Get the metric function
lowerCAmelCase : List[str] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(SCREAMING_SNAKE_CASE : EvalPrediction ):
lowerCAmelCase : List[str] = p.predictions[0] if isinstance(p.predictions , SCREAMING_SNAKE_CASE ) else p.predictions
lowerCAmelCase : Dict = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=SCREAMING_SNAKE_CASE , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowerCAmelCase : Optional[Any] = default_data_collator
elif training_args.fpaa:
lowerCAmelCase : Tuple = DataCollatorWithPadding(SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 )
else:
lowerCAmelCase : List[str] = None
# Initialize our Trainer
lowerCAmelCase : Optional[int] = Trainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , data_collator=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
lowerCAmelCase : Optional[int] = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase : Any = last_checkpoint
lowerCAmelCase : Optional[Any] = trainer.train(resume_from_checkpoint=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = train_result.metrics
lowerCAmelCase : List[str] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(SCREAMING_SNAKE_CASE )
)
lowerCAmelCase : Tuple = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" , SCREAMING_SNAKE_CASE )
trainer.save_metrics("train" , SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCAmelCase : Optional[Any] = trainer.evaluate(eval_dataset=SCREAMING_SNAKE_CASE )
lowerCAmelCase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("eval" , SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , SCREAMING_SNAKE_CASE )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : str = trainer.predict(SCREAMING_SNAKE_CASE , metric_key_prefix="predict" )
lowerCAmelCase : Optional[int] = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(SCREAMING_SNAKE_CASE )
)
lowerCAmelCase : List[Any] = min(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("predict" , SCREAMING_SNAKE_CASE )
trainer.save_metrics("predict" , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[Any] = np.argmax(SCREAMING_SNAKE_CASE , axis=1 )
lowerCAmelCase : Any = os.path.join(training_args.output_dir , "predictions.txt" )
if trainer.is_world_process_zero():
with open(SCREAMING_SNAKE_CASE , "w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(SCREAMING_SNAKE_CASE ):
lowerCAmelCase : Optional[Any] = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 108 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 0 |
"""simple docstring"""
def _snake_case ( UpperCamelCase : dict ):
UpperCAmelCase : set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
UpperCAmelCase : set[int] = set()
return any(
node not in visited and depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
for node in graph )
def _snake_case ( UpperCamelCase : dict , UpperCamelCase : int , UpperCamelCase : set , UpperCamelCase : set ):
visited.add(UpperCamelCase )
rec_stk.add(UpperCamelCase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(UpperCamelCase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 109 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__(self , __magic_name__ ) -> Any:
'''simple docstring'''
super().__init__()
snake_case_ : int = nn.ModuleList(_UpperCAmelCase )
def lowerCamelCase (self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = None , __magic_name__ = False , __magic_name__ = True , ) -> Union[ControlNetOutput, Tuple]:
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase , self.nets ) ):
snake_case_ , snake_case_ : int = controlnet(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
# merge samples
if i == 0:
snake_case_ , snake_case_ : Union[str, Any] = down_samples, mid_sample
else:
snake_case_ : Optional[Any] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_UpperCAmelCase , _UpperCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowerCamelCase (self , __magic_name__ , __magic_name__ = True , __magic_name__ = None , __magic_name__ = False , __magic_name__ = None , ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = 0
snake_case_ : Any = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_UpperCAmelCase , is_main_process=_UpperCAmelCase , save_function=_UpperCAmelCase , safe_serialization=_UpperCAmelCase , variant=_UpperCAmelCase , )
idx += 1
snake_case_ : str = model_path_to_save + F'''_{idx}'''
@classmethod
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = 0
snake_case_ : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
snake_case_ : Tuple = pretrained_model_path
while os.path.isdir(_UpperCAmelCase ):
snake_case_ : List[str] = ControlNetModel.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
controlnets.append(_UpperCAmelCase )
idx += 1
snake_case_ : str = pretrained_model_path + F'''_{idx}'''
logger.info(F'''{len(_UpperCAmelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(_UpperCAmelCase ) == 0:
raise ValueError(
F'''No ControlNets found under {os.path.dirname(_UpperCAmelCase )}. Expected at least {pretrained_model_path + "_0"}.''' )
return cls(_UpperCAmelCase )
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 0 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
from math import floor
import numpy as np
from transformers import CvtConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFCvtForImageClassification, TFCvtModel
from transformers.models.cvt.modeling_tf_cvt import TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a ( lowerCAmelCase__ ):
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """embed_dim""" ) )
self.parent.assertTrue(hasattr(_UpperCAmelCase , """num_heads""" ) )
class a :
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int]=13 , __lowerCAmelCase : Dict=64 , __lowerCAmelCase : Optional[Any]=3 , __lowerCAmelCase : int=[16, 48, 96] , __lowerCAmelCase : Dict=[1, 3, 6] , __lowerCAmelCase : List[Any]=[1, 2, 10] , __lowerCAmelCase : str=[7, 3, 3] , __lowerCAmelCase : List[str]=[4, 2, 2] , __lowerCAmelCase : List[Any]=[2, 1, 1] , __lowerCAmelCase : Union[str, Any]=[2, 2, 2] , __lowerCAmelCase : Tuple=[False, False, True] , __lowerCAmelCase : str=[0.0, 0.0, 0.0] , __lowerCAmelCase : Optional[int]=0.02 , __lowerCAmelCase : List[Any]=1e-1_2 , __lowerCAmelCase : Optional[int]=True , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Union[str, Any]=2 , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
# create a random int32 tensor of given shape
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase_ ( self : List[str] ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self : List[str] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = TFCvtModel(config=_UpperCAmelCase )
_UpperCAmelCase = model(_UpperCAmelCase , training=_UpperCAmelCase )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCAmelCase_ ( self : List[Any] , __lowerCAmelCase : int , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[Any] ):
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFCvtForImageClassification(_UpperCAmelCase )
_UpperCAmelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase , training=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class a ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
_snake_case : List[str] = (TFCvtModel, TFCvtForImageClassification) if is_tf_available() else ()
_snake_case : List[Any] = (
{"feature-extraction": TFCvtModel, "image-classification": TFCvtForImageClassification}
if is_tf_available()
else {}
)
_snake_case : List[Any] = False
_snake_case : Optional[Any] = False
_snake_case : Optional[int] = False
_snake_case : Optional[int] = False
_snake_case : Optional[int] = False
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = TFCvtModelTester(self )
_UpperCAmelCase = TFCvtConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Tuple ):
self.config_tester.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
@unittest.skip(reason="""Cvt does not output attentions""" )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="""Cvt does not use inputs_embeds""" )
def lowerCAmelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason="""Cvt does not support input and output embeddings""" )
def lowerCAmelCase_ ( self : List[str] ):
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
def lowerCAmelCase_ ( self : List[Any] ):
super().test_dataset_conversion()
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 , reason="""TF does not support backprop for grouped convolutions on CPU.""" , )
@slow
def lowerCAmelCase_ ( self : Dict ):
super().test_keras_fit()
@unittest.skip(reason="""Get `Failed to determine best cudnn convolution algo.` error after using TF 2.12+cuda 11.8""" )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = tf.keras.mixed_precision.Policy("""mixed_float16""" )
tf.keras.mixed_precision.set_global_policy(_UpperCAmelCase )
super().test_keras_fit()
tf.keras.mixed_precision.set_global_policy("""float32""" )
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(_UpperCAmelCase )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def lowerCAmelCase_ ( self : str ):
def check_hidden_states_output(__lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Tuple ):
_UpperCAmelCase = model_class(_UpperCAmelCase )
_UpperCAmelCase = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : int ):
for model_name in TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFCvtModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self : Dict ):
return AutoImageProcessor.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = TFCvtForImageClassification.from_pretrained(TF_CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=_UpperCAmelCase , return_tensors="""tf""" )
# forward pass
_UpperCAmelCase = model(**_UpperCAmelCase )
# verify the logits
_UpperCAmelCase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
_UpperCAmelCase = tf.constant([0.9_285, 0.9_015, -0.3_150] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _UpperCAmelCase , atol=1e-4 ) )
| 289 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( lowerCAmelCase__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None
__SCREAMING_SNAKE_CASE : torch.FloatTensor = None
__SCREAMING_SNAKE_CASE : Optional[Tuple[torch.FloatTensor]] = None
__SCREAMING_SNAKE_CASE : Optional[Tuple[torch.FloatTensor]] = None
class a_ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=512 , _lowerCamelCase="cls" , _lowerCamelCase=False , _lowerCamelCase=True , **_lowerCamelCase , ) ->Dict:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = project_dim
SCREAMING_SNAKE_CASE : List[Any] = pooler_fn
SCREAMING_SNAKE_CASE : Optional[int] = learn_encoder
SCREAMING_SNAKE_CASE : int = use_attention_mask
class a_ ( lowerCAmelCase__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [R"pooler", R"logit_scale"]
__SCREAMING_SNAKE_CASE : Any = [R"position_ids", R"predictions.decoder.bias"]
__SCREAMING_SNAKE_CASE : Dict = "roberta"
__SCREAMING_SNAKE_CASE : List[str] = RobertaSeriesConfig
def __init__( self , _lowerCamelCase ) ->Union[str, Any]:
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = XLMRobertaModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE : Union[str, Any] = getattr(_UpperCAmelCase , '''has_pre_transformation''' , _UpperCAmelCase )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(config.hidden_size , config.project_dim )
SCREAMING_SNAKE_CASE : Dict = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) ->Dict:
SCREAMING_SNAKE_CASE : Dict = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : int = self.base_model(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , position_ids=_UpperCAmelCase , head_mask=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , output_attentions=_UpperCAmelCase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_UpperCAmelCase , )
if self.has_pre_transformation:
SCREAMING_SNAKE_CASE : Optional[Any] = outputs['''hidden_states'''][-2]
SCREAMING_SNAKE_CASE : str = self.pre_LN(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Tuple = self.transformation_pre(_UpperCAmelCase )
return TransformationModelOutput(
projection_state=_UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
SCREAMING_SNAKE_CASE : List[str] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_UpperCAmelCase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 313 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 10
@require_sentencepiece
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[Any] = True
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__lowercase = sp.SentencePieceProcessor()
spm_model.Load(_UpperCAmelCase )
__lowercase = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )]
__lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = '<pad>'
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 10_01 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , )
__lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium"
lowerCAmelCase__ : Dict = "C'est trop cool"
lowerCAmelCase__ : List[Any] = "Esto es genial"
@classmethod
def a__ ( cls : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def a__ ( self : str ) -> int:
"""simple docstring"""
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
__lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2]
__lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'fr'
__lowercase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _UpperCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__lowercase = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 325 | 0 |
import requests
__snake_case = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def _A ( SCREAMING_SNAKE_CASE__ : str ):
# fetching a list of articles in json format
UpperCamelCase :Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 259 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = version.parse("1.12" )
@property
def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : int ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return 12
def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 325 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_lowercase : int = logging.get_logger(__name__)
class _UpperCAmelCase ( lowerCAmelCase__ ):
a__ : Any = ["pixel_values"]
def __init__( self : Tuple , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : bool = True , **_lowercase : int , ):
super().__init__(**_UpperCAmelCase )
__UpperCAmelCase = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
__UpperCAmelCase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__UpperCAmelCase = do_resize
__UpperCAmelCase = size
__UpperCAmelCase = resample
__UpperCAmelCase = do_rescale
__UpperCAmelCase = rescale_factor
__UpperCAmelCase = do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
__UpperCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
__UpperCAmelCase = do_convert_rgb
def a ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
__UpperCAmelCase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
__UpperCAmelCase = (size['''height'''], size['''width'''])
return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a ( self : Tuple , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : List[str] , ):
return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase )
def a ( self : Union[str, Any] , _lowercase : ImageInput , _lowercase : Optional[bool] = None , _lowercase : Optional[Dict[str, int]] = None , _lowercase : PILImageResampling = None , _lowercase : Optional[bool] = None , _lowercase : Optional[float] = None , _lowercase : Optional[bool] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : bool = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
__UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
__UpperCAmelCase = resample if resample is not None else self.resample
__UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
__UpperCAmelCase = image_std if image_std is not None else self.image_std
__UpperCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
__UpperCAmelCase = size if size is not None else self.size
__UpperCAmelCase = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase )
__UpperCAmelCase = make_list_of_images(_UpperCAmelCase )
if not valid_images(_UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
__UpperCAmelCase = [convert_to_rgb(_UpperCAmelCase ) for image in images]
# All transformations expect numpy arrays.
__UpperCAmelCase = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
__UpperCAmelCase = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images]
if do_rescale:
__UpperCAmelCase = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images]
if do_normalize:
__UpperCAmelCase = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images]
__UpperCAmelCase = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images]
__UpperCAmelCase = BatchFeature(data={'''pixel_values''': images} , tensor_type=_UpperCAmelCase )
return encoded_outputs
| 332 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowercase = self.embedder(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def a__ ( self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = self.layers(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_UpperCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = RegNetConfig
lowerCAmelCase__ : Optional[int] = "regnet"
lowerCAmelCase__ : Dict = "pixel_values"
lowerCAmelCase__ : List[str] = True
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = value
SCREAMING_SNAKE_CASE__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = RegNetEmbeddings(_UpperCAmelCase )
__lowercase = RegNetEncoder(_UpperCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = RegNetModel(_UpperCAmelCase )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = 'single_label_classification'
else:
__lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 325 | 0 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCAmelCase__ ,unittest.TestCase ):
__lowerCamelCase : str = LayoutLMTokenizer
__lowerCamelCase : Any = LayoutLMTokenizerFast
__lowerCamelCase : Optional[int] = True
__lowerCamelCase : Any = True
def _snake_case ( self ) -> Tuple:
super().setUp()
_lowerCAmelCase = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _snake_case ( self , **_lowerCAmelCase ) -> Optional[int]:
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def _snake_case ( self , _lowerCAmelCase ) -> Union[str, Any]:
_lowerCAmelCase = "UNwant\u00E9d,running"
_lowerCAmelCase = "unwanted, running"
return input_text, output_text
def _snake_case ( self ) -> str:
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(_UpperCAmelCase , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self ) -> int:
pass
| 158 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : str=3, UpperCAmelCase__ : Optional[int]=3_2, UpperCAmelCase__ : List[Any]=3, UpperCAmelCase__ : Optional[int]=1_0, UpperCAmelCase__ : List[str]=[1_0, 2_0, 3_0, 4_0], UpperCAmelCase__ : List[str]=[1, 1, 2, 1], UpperCAmelCase__ : Dict=True, UpperCAmelCase__ : List[str]=True, UpperCAmelCase__ : Optional[Any]="relu", UpperCAmelCase__ : int=3, UpperCAmelCase__ : Tuple=None, ):
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = num_channels
__lowercase = embeddings_size
__lowercase = hidden_sizes
__lowercase = depths
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_act
__lowercase = num_labels
__lowercase = scope
__lowercase = len(_UpperCAmelCase )
def _lowercase ( self : Dict ):
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = self.get_config()
return config, pixel_values
def _lowercase ( self : Tuple ):
return RegNetConfig(
num_channels=self.num_channels, embeddings_size=self.embeddings_size, hidden_sizes=self.hidden_sizes, depths=self.depths, hidden_act=self.hidden_act, num_labels=self.num_labels, image_size=self.image_size, )
def _lowercase ( self : Dict, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : int ):
__lowercase = FlaxRegNetModel(config=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2), )
def _lowercase ( self : str, UpperCAmelCase__ : Tuple, UpperCAmelCase__ : List[str] ):
__lowercase = self.num_labels
__lowercase = FlaxRegNetForImageClassification(config=_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def _lowercase ( self : List[Any] ):
__lowercase = self.prepare_config_and_inputs()
__lowercase ,__lowercase = config_and_inputs
__lowercase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class _lowerCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
__UpperCAmelCase : Dict = False
__UpperCAmelCase : Optional[Any] = False
__UpperCAmelCase : Optional[int] = False
def _lowercase ( self : List[str] ):
__lowercase = FlaxRegNetModelTester(self )
__lowercase = ConfigTester(self, config_class=_UpperCAmelCase, has_text_modality=_UpperCAmelCase )
def _lowercase ( self : List[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : int ):
return
def _lowercase ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowercase ( self : Dict ):
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def _lowercase ( self : List[Any] ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def _lowercase ( self : Optional[Any] ):
pass
def _lowercase ( self : Any ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ["pixel_values"]
self.assertListEqual(arg_names[:1], _UpperCAmelCase )
def _lowercase ( self : Optional[int] ):
def check_hidden_states_output(UpperCAmelCase__ : Optional[int], UpperCAmelCase__ : Dict, UpperCAmelCase__ : List[Any] ):
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(**self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase ) )
__lowercase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ), expected_num_stages + 1 )
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = True
check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase = True
check_hidden_states_output(_UpperCAmelCase, _UpperCAmelCase, _UpperCAmelCase )
def _lowercase ( self : Tuple ):
__lowercase ,__lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowercase = self._prepare_for_class(_UpperCAmelCase, _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
@jax.jit
def model_jitted(UpperCAmelCase__ : List[Any], **UpperCAmelCase__ : str ):
return model(pixel_values=_UpperCAmelCase, **_UpperCAmelCase )
with self.subTest("JIT Enabled" ):
__lowercase = model_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
__lowercase = model_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ), len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase, _UpperCAmelCase ):
self.assertEqual(jitted_output.shape, output.shape )
def _A ( ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_flax
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def _lowercase ( self : Union[str, Any] ):
__lowercase = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_UpperCAmelCase, return_tensors="np" )
__lowercase = model(**_UpperCAmelCase )
# verify the logits
__lowercase = (1, 1_0_0_0)
self.assertEqual(outputs.logits.shape, _UpperCAmelCase )
__lowercase = jnp.array([-0.4_180, -1.5_051, -3.4_836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3], _UpperCAmelCase, atol=1E-4 ) )
| 17 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A_ ( lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Any = "dandelin/vilt-b32-finetuned-vqa"
_UpperCamelCase : int = (
"This is a tool that answers a question about an image. It takes an input named `image` which should be the "
"image containing the information, as well as a `question` which should be the question in English. It "
"returns a text that is the answer to the question."
)
_UpperCamelCase : List[Any] = "image_qa"
_UpperCamelCase : Any = AutoProcessor
_UpperCamelCase : Tuple = AutoModelForVisualQuestionAnswering
_UpperCamelCase : int = ["image", "text"]
_UpperCamelCase : Any = ["text"]
def __init__( self , *snake_case , **snake_case ):
requires_backends(self , ['vision'] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case ):
return self.pre_processor(_UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
with torch.no_grad():
return self.model(**_UpperCAmelCase ).logits
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 195 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 0 |
def a_ ( __lowercase : int = 1_000 ) -> int:
_snake_case = -1
_snake_case = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_snake_case = (n * n - 2 * a * n) // (2 * n - 2 * a)
_snake_case = n - a - b
if c * c == (a * a + b * b):
_snake_case = a * b * c
if candidate >= product:
_snake_case = candidate
return product
if __name__ == "__main__":
print(F'{solution() = }') | 282 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [torch.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [tf.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowercase = [tf.convert_to_tensor(_UpperCAmelCase )]
__lowercase = [torch.tensor(_UpperCAmelCase )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 325 | 0 |
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class lowercase__ ( lowerCAmelCase__, unittest.TestCase ):
a_ =RoFormerTokenizer
a_ =RoFormerTokenizerFast
a_ =True
a_ =True
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
super().setUp()
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **_UpperCAmelCase )
def UpperCAmelCase ( self , **__UpperCAmelCase )-> Dict:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("junnyu/roformer_chinese_base" , **_UpperCAmelCase )
def UpperCAmelCase ( self )-> List[str]:
'''simple docstring'''
lowerCAmelCase__ = "永和服装饰品有限公司,今天天气非常好"
lowerCAmelCase__ = "永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"
return input_text, output_text
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ , lowerCAmelCase__ = self.get_chinese_input_output_texts()
lowerCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , output_text.split() )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCAmelCase ( self )-> List[Any]:
'''simple docstring'''
lowerCAmelCase__ = self.get_rust_tokenizer()
lowerCAmelCase__ , lowerCAmelCase__ = self.get_chinese_input_output_texts()
lowerCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , output_text.split() )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [22943, 21332, 34431, 45904, 117, 306, 1231, 1231, 2653, 33994, 1266, 100]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCAmelCase ( self )-> int:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
pass
def UpperCAmelCase ( self )-> Optional[Any]:
'''simple docstring'''
pass
| 340 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 | 0 |
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_SCREAMING_SNAKE_CASE = """platform"""
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a=None , __a=None , __a=None , __a=None , __a=None , __a=None , ):
if attention_mask is None:
snake_case_ : Optional[int] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
snake_case_ : int = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
snake_case_ : Any = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
snake_case_ : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
snake_case_ : List[str] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class SCREAMING_SNAKE_CASE_ :
def __init__( self : List[Any] , _A : Tuple , _A : Tuple=13 , _A : Union[str, Any]=7 , _A : List[str]=True , _A : Any=False , _A : int=99 , _A : Dict=16 , _A : Tuple=2 , _A : Optional[Any]=4 , _A : Optional[int]=4 , _A : str="gelu" , _A : Dict=0.1 , _A : Tuple=0.1 , _A : Optional[Any]=32 , _A : Tuple=2 , _A : Tuple=1 , _A : Optional[int]=0 , _A : str=0.0_2 , ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = parent
snake_case_ : Optional[Any] = batch_size
snake_case_ : List[str] = seq_length
snake_case_ : Dict = is_training
snake_case_ : Union[str, Any] = use_labels
snake_case_ : List[str] = vocab_size
snake_case_ : Any = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : str = num_attention_heads
snake_case_ : str = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : Dict = attention_probs_dropout_prob
snake_case_ : Any = max_position_embeddings
snake_case_ : Any = eos_token_id
snake_case_ : Optional[Any] = pad_token_id
snake_case_ : Tuple = bos_token_id
snake_case_ : List[Any] = initializer_range
def UpperCAmelCase_ ( self : str ) -> Dict:
"""simple docstring"""
snake_case_ : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
snake_case_ : str = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
snake_case_ : Optional[int] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
snake_case_ : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , )
snake_case_ : Optional[Any] = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return config, inputs_dict
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase_ ( self : int , _A : Optional[int] , _A : List[str] , _A : List[str] ) -> str:
"""simple docstring"""
snake_case_ : Union[str, Any] = 20
snake_case_ : Optional[Any] = model_class_name(_UpperCAmelCase )
snake_case_ : List[Any] = model.encode(inputs_dict['input_ids'] )
snake_case_ ,snake_case_ : Union[str, Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
snake_case_ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
snake_case_ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ : Optional[Any] = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
snake_case_ : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case_ : Dict = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , )
snake_case_ : Any = model.decode(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase_ ( self : Union[str, Any] , _A : str , _A : str , _A : List[str] ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = 20
snake_case_ : Optional[int] = model_class_name(_UpperCAmelCase )
snake_case_ : Union[str, Any] = model.encode(inputs_dict['input_ids'] )
snake_case_ ,snake_case_ : int = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
snake_case_ : Optional[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case_ : Union[str, Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ : Optional[int] = model.decode(
decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
snake_case_ : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case_ : str = model.decode(
decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , )
snake_case_ : Union[str, Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase )
snake_case_ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
__magic_name__: Any = 99
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
snake_case_ : Optional[Any] = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
snake_case_ : Union[str, Any] = input_ids.shape[0]
snake_case_ : Optional[int] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase_ ( self : int ) -> Any:
"""simple docstring"""
snake_case_ ,snake_case_ ,snake_case_ : Optional[int] = self._get_config_and_data()
snake_case_ : Union[str, Any] = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
snake_case_ : str = lm_model(input_ids=_UpperCAmelCase )
snake_case_ : Any = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Any ) -> Dict:
"""simple docstring"""
snake_case_ : int = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
snake_case_ : Optional[Any] = FlaxBlenderbotForConditionalGeneration(_UpperCAmelCase )
snake_case_ : int = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
snake_case_ : str = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
snake_case_ : Tuple = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
snake_case_ : Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , _UpperCAmelCase )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : str = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
snake_case_ : Union[str, Any] = shift_tokens_right(_UpperCAmelCase , 1 , 2 )
snake_case_ : Optional[Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
snake_case_ : List[str] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_UpperCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ , unittest.TestCase , lowerCAmelCase__ ):
__magic_name__: Any = True
__magic_name__: Tuple = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
__magic_name__: str = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : int = FlaxBlenderbotModelTester(self )
def UpperCAmelCase_ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case_ ,snake_case_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Dict ) -> List[Any]:
"""simple docstring"""
snake_case_ ,snake_case_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ : List[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Tuple = model_class(_UpperCAmelCase )
@jax.jit
def encode_jitted(_A : List[Any] , _A : List[Any]=None , **_A : str ):
return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase )
with self.subTest('JIT Enabled' ):
snake_case_ : Dict = encode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case_ : str = encode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
snake_case_ ,snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ : List[Any] = model_class(_UpperCAmelCase )
snake_case_ : Tuple = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
snake_case_ : Optional[Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(_A : int , _A : List[Any] , _A : int ):
return model.decode(
decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , )
with self.subTest('JIT Enabled' ):
snake_case_ : str = decode_jitted(**_UpperCAmelCase ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case_ : Dict = decode_jitted(**_UpperCAmelCase ).to_tuple()
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case_ : Union[str, Any] = model_class_name.from_pretrained('facebook/blenderbot-400M-distill' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
snake_case_ : Union[str, Any] = np.ones((1, 1) ) * model.config.eos_token_id
snake_case_ : List[str] = model(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skipUnless(jax_device != 'cpu' , '3B test too slow on CPU.' )
@slow
def UpperCAmelCase_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[Any] = {'num_beams': 1, 'early_stopping': True, 'min_length': 15, 'max_length': 25}
snake_case_ : int = {'skip_special_tokens': True, 'clean_up_tokenization_spaces': True}
snake_case_ : Optional[int] = FlaxBlenderbotForConditionalGeneration.from_pretrained('facebook/blenderbot-3B' , from_pt=_UpperCAmelCase )
snake_case_ : Optional[int] = BlenderbotTokenizer.from_pretrained('facebook/blenderbot-3B' )
snake_case_ : Dict = ['Sam']
snake_case_ : str = tokenizer(_UpperCAmelCase , return_tensors='jax' )
snake_case_ : str = model.generate(**_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ : Optional[int] = 'Sam is a great name. It means "sun" in Gaelic.'
snake_case_ : Optional[Any] = tokenizer.batch_decode(_UpperCAmelCase , **_UpperCAmelCase )
assert generated_txt[0].strip() == tgt_text
| 327 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowerCAmelCase_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def lowerCamelCase_ ( _UpperCamelCase ) -> Dict:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case_ : Optional[Any] = model_type_to_module_name(_UpperCamelCase )
snake_case_ : Any = importlib.import_module(f'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(_UpperCamelCase , _UpperCamelCase )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_UpperCamelCase , '''__name__''' , _UpperCamelCase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case_ : Tuple = importlib.import_module('''transformers''' )
if hasattr(_UpperCamelCase , _UpperCamelCase ):
return getattr(_UpperCamelCase , _UpperCamelCase )
return None
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False , _UpperCamelCase = False , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = False , **_UpperCamelCase , ) -> Tuple:
"""simple docstring"""
snake_case_ : Optional[int] = get_file_from_repo(
_UpperCamelCase , _UpperCamelCase , cache_dir=_UpperCamelCase , force_download=_UpperCamelCase , resume_download=_UpperCamelCase , proxies=_UpperCamelCase , use_auth_token=_UpperCamelCase , revision=_UpperCamelCase , local_files_only=_UpperCamelCase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(_UpperCamelCase , encoding='''utf-8''' ) as reader:
return json.load(_UpperCamelCase )
class __lowerCAmelCase :
def __init__(self ) -> Optional[int]:
'''simple docstring'''
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def lowerCamelCase (cls , __magic_name__ , **__magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = kwargs.pop('''config''' , _UpperCAmelCase )
snake_case_ : Any = kwargs.pop('''trust_remote_code''' , _UpperCAmelCase )
snake_case_ : Optional[Any] = True
snake_case_ , snake_case_ : List[str] = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase , **_UpperCAmelCase )
snake_case_ : List[str] = config_dict.get('''image_processor_type''' , _UpperCAmelCase )
snake_case_ : Optional[int] = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
snake_case_ : str = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case_ : Optional[Any] = config_dict.pop('''feature_extractor_type''' , _UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
snake_case_ : List[str] = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
snake_case_ : Tuple = config_dict['''auto_map''']['''AutoFeatureExtractor''']
snake_case_ : List[str] = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
snake_case_ : int = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# It could be in `config.image_processor_type``
snake_case_ : int = getattr(_UpperCAmelCase , '''image_processor_type''' , _UpperCAmelCase )
if hasattr(_UpperCAmelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
snake_case_ : List[Any] = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
snake_case_ : List[str] = image_processor_class_from_name(_UpperCAmelCase )
snake_case_ : List[Any] = image_processor_auto_map is not None
snake_case_ : Optional[Any] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
snake_case_ : Tuple = resolve_trust_remote_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if has_remote_code and trust_remote_code:
snake_case_ : str = get_class_from_dynamic_module(
_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
snake_case_ : Tuple = kwargs.pop('''code_revision''' , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
snake_case_ : Tuple = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCamelCase (__magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase , _UpperCAmelCase )
| 279 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
for attribute in key.split('.' ):
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase = None
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowercase = True
elif name.split('.' )[0] == "proj":
__lowercase = fairseq_model.proj
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowercase = 'weight_g'
elif "weight_v" in name:
__lowercase = 'weight_v'
elif "bias" in name:
__lowercase = 'bias'
elif "weight" in name:
__lowercase = 'weight'
else:
__lowercase = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
__lowercase = full_name.split('conv_layers.' )[-1]
__lowercase = name.split('.' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowercase = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__lowercase = f.readlines()
__lowercase = [line.split(' ' )[0] for line in lines]
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
__lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase = model[0].eval()
# set weights for wav2vec2 encoder
__lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowercase = False
# add projection layer
__lowercase = nn.Parameter(projection_layer.weight )
__lowercase = nn.Parameter(projection_layer.bias )
__lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = hf_wavavec.config.to_dict()
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer.eos_token_id
__lowercase = 'speech_to_text_2'
__lowercase = 'wav2vec2'
__lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 325 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[int] ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = SamImageProcessor()
_UpperCAmelCase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : int , **__lowerCAmelCase : Optional[Any] ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def lowerCAmelCase_ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self : List[Any] ):
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCAmelCase_ ( self : int ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = SamProcessor(image_processor=_UpperCAmelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_UpperCAmelCase = processor(images=_UpperCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = SamProcessor(image_processor=_UpperCAmelCase )
_UpperCAmelCase = [torch.ones((1, 3, 5, 5) )]
_UpperCAmelCase = [[1764, 2646]]
_UpperCAmelCase = [[683, 1024]]
_UpperCAmelCase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_UpperCAmelCase = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
_UpperCAmelCase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
_UpperCAmelCase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = SamImageProcessor()
_UpperCAmelCase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : str , **__lowerCAmelCase : Tuple ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def lowerCAmelCase_ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_UpperCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def lowerCAmelCase_ ( self : Optional[Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = SamProcessor(image_processor=_UpperCAmelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_UpperCAmelCase = processor(images=_UpperCAmelCase , return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def lowerCAmelCase_ ( self : Dict ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = SamProcessor(image_processor=_UpperCAmelCase )
_UpperCAmelCase = [tf.ones((1, 3, 5, 5) )]
_UpperCAmelCase = [[1764, 2646]]
_UpperCAmelCase = [[683, 1024]]
_UpperCAmelCase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_UpperCAmelCase = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors="""tf""" , )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
# should also work with np
_UpperCAmelCase = [np.ones((1, 3, 5, 5) )]
_UpperCAmelCase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors="""tf""" )
self.assertEqual(masks[0].shape , (1, 3, 1764, 2646) )
_UpperCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
_UpperCAmelCase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors="""tf""" )
@require_vision
@require_torchvision
class a ( unittest.TestCase ):
def lowerCAmelCase_ ( self : Any ):
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = SamImageProcessor()
_UpperCAmelCase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self : Dict , **__lowerCAmelCase : int ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def lowerCAmelCase_ ( self : List[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self : List[str] ):
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def lowerCAmelCase_ ( self : Tuple ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = SamProcessor(image_processor=_UpperCAmelCase )
_UpperCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
_UpperCAmelCase = [tf.convert_to_tensor(_UpperCAmelCase )]
_UpperCAmelCase = [torch.tensor(_UpperCAmelCase )]
_UpperCAmelCase = [[1764, 2646]]
_UpperCAmelCase = [[683, 1024]]
_UpperCAmelCase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors="""tf""" )
_UpperCAmelCase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def lowerCAmelCase_ ( self : Union[str, Any] ):
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = SamProcessor(image_processor=_UpperCAmelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(_UpperCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
_UpperCAmelCase = processor(images=_UpperCAmelCase , return_tensors="""pt""" )["""pixel_values"""].numpy()
_UpperCAmelCase = image_processor(_UpperCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
_UpperCAmelCase = processor(images=_UpperCAmelCase , return_tensors="""tf""" )["""pixel_values"""].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 289 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a__ : Any = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''MaskFormerFeatureExtractor''']
a__ : str = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[int] = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
a__ : List[str] = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
a__ : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 313 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["vqvae"]
def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_UpperCAmelCase , device=self.device , )
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase )
__lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 2_55) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=_UpperCAmelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _UpperCAmelCase ):
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample']
else:
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
if isinstance(self.scheduler , _UpperCAmelCase ):
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
else:
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_UpperCAmelCase )['sample']
__lowercase = (images / 2 + 0.5).clamp(0 , 1 )
__lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowercase = (images * 2_55).round().astype('uint8' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) )
@torch.no_grad()
def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 2_55) * 2 - 1
__lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor:
"""simple docstring"""
__lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
| 325 | 0 |
import unittest
from transformers import (
MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TextGenerationPipeline,
logging,
pipeline,
)
from transformers.testing_utils import (
CaptureLogger,
is_pipeline_test,
require_accelerate,
require_tf,
require_torch,
require_torch_gpu,
require_torch_or_tf,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =MODEL_FOR_CAUSAL_LM_MAPPING
UpperCamelCase_ : str =TF_MODEL_FOR_CAUSAL_LM_MAPPING
@require_torch
def UpperCAmelCase ( self ) -> int:
UpperCamelCase :Optional[Any] = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''pt''' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase :Union[str, Any] = text_generator('''This is a test''' , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
] , )
UpperCamelCase :int = text_generator(['''This is a test''', '''This is a second test'''] )
self.assertEqual(
_UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope.'''
''' oscope. FiliFili@@'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy'''
''' oscope. oscope. FiliFili@@'''
)
}
],
] , )
UpperCamelCase :Any = text_generator('''This is a test''' , do_sample=_UpperCAmelCase , num_return_sequences=2 , return_tensors=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''generated_token_ids''': ANY(_UpperCAmelCase )},
{'''generated_token_ids''': ANY(_UpperCAmelCase )},
] , )
UpperCamelCase :Optional[Any] = text_generator.model.config.eos_token_id
UpperCamelCase :List[Any] = '''<pad>'''
UpperCamelCase :Dict = text_generator(
['''This is a test''', '''This is a second test'''] , do_sample=_UpperCAmelCase , num_return_sequences=2 , batch_size=2 , return_tensors=_UpperCAmelCase , )
self.assertEqual(
_UpperCAmelCase , [
[
{'''generated_token_ids''': ANY(_UpperCAmelCase )},
{'''generated_token_ids''': ANY(_UpperCAmelCase )},
],
[
{'''generated_token_ids''': ANY(_UpperCAmelCase )},
{'''generated_token_ids''': ANY(_UpperCAmelCase )},
],
] , )
@require_tf
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Dict = pipeline(task='''text-generation''' , model='''sshleifer/tiny-ctrl''' , framework='''tf''' )
# Using `do_sample=False` to force deterministic output
UpperCamelCase :Union[str, Any] = text_generator('''This is a test''' , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
] , )
UpperCamelCase :List[Any] = text_generator(['''This is a test''', '''This is a second test'''] , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
[
{
'''generated_text''': (
'''This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵'''
''' please,'''
)
}
],
[
{
'''generated_text''': (
'''This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes'''
''' Cannes 閲閲Cannes Cannes Cannes 攵 please,'''
)
}
],
] , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
UpperCamelCase :List[Any] = TextGenerationPipeline(model=_UpperCAmelCase , tokenizer=_UpperCAmelCase )
return text_generator, ["This is a test", "Another test"]
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase :Dict = '''Hello I believe in'''
UpperCamelCase :List[Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
UpperCamelCase :List[str] = text_generator(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe'''}] , )
UpperCamelCase :int = text_generator(_UpperCAmelCase , stop_sequence=''' fe''' )
self.assertEqual(_UpperCAmelCase , [{'''generated_text''': '''Hello I believe in fe'''}] )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[Any] = text_generator.model
UpperCamelCase :Optional[int] = text_generator.tokenizer
UpperCamelCase :List[str] = text_generator('''This is a test''' )
self.assertEqual(_UpperCAmelCase , [{'''generated_text''': ANY(_UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
UpperCamelCase :Any = text_generator('''This is a test''' , return_full_text=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [{'''generated_text''': ANY(_UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
UpperCamelCase :Union[str, Any] = pipeline(task='''text-generation''' , model=_UpperCAmelCase , tokenizer=_UpperCAmelCase , return_full_text=_UpperCAmelCase )
UpperCamelCase :Optional[Any] = text_generator('''This is a test''' )
self.assertEqual(_UpperCAmelCase , [{'''generated_text''': ANY(_UpperCAmelCase )}] )
self.assertNotIn('''This is a test''' , outputs[0]['''generated_text'''] )
UpperCamelCase :int = text_generator('''This is a test''' , return_full_text=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , [{'''generated_text''': ANY(_UpperCAmelCase )}] )
self.assertTrue(outputs[0]['''generated_text'''].startswith('''This is a test''' ) )
UpperCamelCase :Dict = text_generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
[{'''generated_text''': ANY(_UpperCAmelCase )}, {'''generated_text''': ANY(_UpperCAmelCase )}],
[{'''generated_text''': ANY(_UpperCAmelCase )}, {'''generated_text''': ANY(_UpperCAmelCase )}],
] , )
if text_generator.tokenizer.pad_token is not None:
UpperCamelCase :List[Any] = text_generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
[{'''generated_text''': ANY(_UpperCAmelCase )}, {'''generated_text''': ANY(_UpperCAmelCase )}],
[{'''generated_text''': ANY(_UpperCAmelCase )}, {'''generated_text''': ANY(_UpperCAmelCase )}],
] , )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase :Optional[Any] = text_generator('''test''' , return_full_text=_UpperCAmelCase , return_text=_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase :List[str] = text_generator('''test''' , return_full_text=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase ):
UpperCamelCase :Dict = text_generator('''test''' , return_text=_UpperCAmelCase , return_tensors=_UpperCAmelCase )
# Empty prompt is slighly special
# it requires BOS token to exist.
# Special case for Pegasus which will always append EOS so will
# work even without BOS.
if (
text_generator.tokenizer.bos_token_id is not None
or "Pegasus" in tokenizer.__class__.__name__
or "Git" in model.__class__.__name__
):
UpperCamelCase :List[Any] = text_generator('''''' )
self.assertEqual(_UpperCAmelCase , [{'''generated_text''': ANY(_UpperCAmelCase )}] )
else:
with self.assertRaises((ValueError, AssertionError) ):
UpperCamelCase :str = text_generator('''''' )
if text_generator.framework == "tf":
# TF generation does not support max_new_tokens, and it's impossible
# to control long generation with only max_length without
# fancy calculation, dismissing tests for now.
return
# We don't care about infinite range models.
# They already work.
# Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly.
UpperCamelCase :Any = ['''RwkvForCausalLM''', '''XGLMForCausalLM''', '''GPTNeoXForCausalLM''']
if (
tokenizer.model_max_length < 1_0000
and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS
):
# Handling of large generations
with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ):
text_generator('''This is a test''' * 500 , max_new_tokens=20 )
UpperCamelCase :Optional[int] = text_generator('''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=20 )
# Hole strategy cannot work
with self.assertRaises(_UpperCAmelCase ):
text_generator(
'''This is a test''' * 500 , handle_long_generation='''hole''' , max_new_tokens=tokenizer.model_max_length + 10 , )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self ) -> Optional[int]:
import torch
# Classic `model_kwargs`
UpperCamelCase :int = pipeline(
model='''hf-internal-testing/tiny-random-bloom''' , model_kwargs={'''device_map''': '''auto''', '''torch_dtype''': torch.bfloataa} , )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase :Tuple = pipe('''This is a test''' )
self.assertEqual(
_UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.)
UpperCamelCase :List[Any] = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.bfloataa )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa )
UpperCamelCase :List[Any] = pipe('''This is a test''' )
self.assertEqual(
_UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
# torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602
UpperCamelCase :Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' )
self.assertEqual(pipe.model.device , torch.device(0 ) )
self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa )
UpperCamelCase :Any = pipe('''This is a test''' )
self.assertEqual(
_UpperCAmelCase , [
{
'''generated_text''': (
'''This is a test test test test test test test test test test test test test test test test'''
''' test'''
)
}
] , )
@require_torch
@require_torch_gpu
def UpperCAmelCase ( self ) -> Optional[Any]:
import torch
UpperCamelCase :int = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device=0 , torch_dtype=torch.floataa )
pipe('''This is a test''' )
@require_torch
@require_accelerate
@require_torch_gpu
def UpperCAmelCase ( self ) -> List[str]:
import torch
UpperCamelCase :Any = pipeline(model='''hf-internal-testing/tiny-random-bloom''' , device_map='''auto''' , torch_dtype=torch.floataa )
pipe('''This is a test''' , do_sample=_UpperCAmelCase , top_p=0.5 )
def UpperCAmelCase ( self ) -> str:
UpperCamelCase :Tuple = '''Hello world'''
UpperCamelCase :Union[str, Any] = pipeline('''text-generation''' , model='''hf-internal-testing/tiny-random-gpt2''' )
if text_generator.model.framework == "tf":
UpperCamelCase :Union[str, Any] = logging.get_logger('''transformers.generation.tf_utils''' )
else:
UpperCamelCase :Dict = logging.get_logger('''transformers.generation.utils''' )
UpperCamelCase :Any = '''Both `max_new_tokens`''' # The beggining of the message to be checked in this test
# Both are set by the user -> log warning
with CaptureLogger(_UpperCAmelCase ) as cl:
UpperCamelCase :Any = text_generator(_UpperCAmelCase , max_length=10 , max_new_tokens=1 )
self.assertIn(_UpperCAmelCase , cl.out )
# The user only sets one -> no warning
with CaptureLogger(_UpperCAmelCase ) as cl:
UpperCamelCase :List[str] = text_generator(_UpperCAmelCase , max_new_tokens=1 )
self.assertNotIn(_UpperCAmelCase , cl.out )
with CaptureLogger(_UpperCAmelCase ) as cl:
UpperCamelCase :Optional[Any] = text_generator(_UpperCAmelCase , max_length=10 )
self.assertNotIn(_UpperCAmelCase , cl.out )
| 259 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ = 10
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowercase = one_third - 1
elif array[two_third] < target:
__lowercase = two_third + 1
else:
__lowercase = one_third + 1
__lowercase = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 325 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase : Optional[int] = logging.get_logger(__name__)
_lowercase : str = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class _UpperCAmelCase ( lowerCAmelCase__ ):
a__ : str = "mobilenet_v1"
def __init__( self : str , _lowercase : List[Any]=3 , _lowercase : List[str]=2_24 , _lowercase : int=1.0 , _lowercase : Optional[Any]=8 , _lowercase : List[str]="relu6" , _lowercase : List[str]=True , _lowercase : Any=0.999 , _lowercase : Optional[Any]=0.02 , _lowercase : Any=0.001 , **_lowercase : Optional[int] , ):
super().__init__(**_UpperCAmelCase )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
__UpperCAmelCase = num_channels
__UpperCAmelCase = image_size
__UpperCAmelCase = depth_multiplier
__UpperCAmelCase = min_depth
__UpperCAmelCase = hidden_act
__UpperCAmelCase = tf_padding
__UpperCAmelCase = classifier_dropout_prob
__UpperCAmelCase = initializer_range
__UpperCAmelCase = layer_norm_eps
class _UpperCAmelCase ( lowerCAmelCase__ ):
a__ : str = version.parse("1.11" )
@property
def a ( self : Tuple ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def a ( self : Dict ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def a ( self : str ):
return 1E-4
| 332 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325 | 0 |
'''simple docstring'''
from timeit import timeit
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowerCAmelCase = 0
while number:
number &= number - 1
result += 1
return result
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if number < 0:
raise ValueError("the value of input must not be negative" )
_lowerCAmelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def __a():
'''simple docstring'''
def do_benchmark(SCREAMING_SNAKE_CASE_ : int ) -> None:
_lowerCAmelCase = "import __main__ as z"
print(F'''Benchmark when {number = }:''' )
print(F'''{get_set_bits_count_using_modulo_operator(SCREAMING_SNAKE_CASE_ ) = }''' )
_lowerCAmelCase = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=SCREAMING_SNAKE_CASE_ )
print(F'''timeit() runs in {timing} seconds''' )
print(F'''{get_set_bits_count_using_brian_kernighans_algorithm(SCREAMING_SNAKE_CASE_ ) = }''' )
_lowerCAmelCase = timeit(
"z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=SCREAMING_SNAKE_CASE_ , )
print(F'''timeit() runs in {timing} seconds''' )
for number in (25, 37, 58, 0):
do_benchmark(SCREAMING_SNAKE_CASE_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 158 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 0 |
"""simple docstring"""
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _lowerCAmelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def _lowercase ( self : List[Any], UpperCAmelCase__ : str ):
with open(_UpperCAmelCase, encoding="utf-8" ) as input_file:
__lowercase = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
__lowercase = input_file.read()
__lowercase = regexp.search(_UpperCAmelCase )
return match
def _lowercase ( self : Tuple, UpperCAmelCase__ : str ):
with open(_UpperCAmelCase, encoding="utf-8" ) as input_file:
__lowercase = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()", re.DOTALL )
__lowercase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
__lowercase = regexp.finditer(_UpperCAmelCase )
__lowercase = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _lowercase ( self : Union[str, Any] ):
__lowercase = Path("./datasets" )
__lowercase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def _lowercase ( self : str ):
__lowercase = Path("./datasets" )
__lowercase = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 17 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325 | 0 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A_ ( lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = ["vqvae"]
def __init__( self , snake_case , snake_case , snake_case , snake_case , ):
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 1000
@torch.no_grad()
def __call__( self , snake_case = 1 , snake_case = None , snake_case = None , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = None , snake_case = 0 , snake_case = 0 , snake_case = None , snake_case = 0 , snake_case = None , snake_case = None , snake_case=True , ):
lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_UpperCAmelCase )
lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_UpperCAmelCase , device=self.device , )
lowercase = noise
lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase )
lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase )
lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
lowercase = (input_image / 255) * 2 - 1
lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=_UpperCAmelCase )[0]
lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
lowercase = int(mask_start_secs * pixels_per_second )
lowercase = int(mask_end_secs * pixels_per_second )
lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _UpperCAmelCase ):
lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample']
else:
lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
if isinstance(self.scheduler , _UpperCAmelCase ):
lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
else:
lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
lowercase = 1 / self.vqvae.config.scaling_factor * images
lowercase = self.vqvae.decode(_UpperCAmelCase )['sample']
lowercase = (images / 2 + 0.5).clamp(0 , 1 )
lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
lowercase = (images * 255).round().astype('uint8' )
lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) )
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = 50 ):
assert isinstance(self.scheduler , _UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase )
lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
lowercase = (sample / 255) * 2 - 1
lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
lowercase = self.scheduler.alphas_cumprod[t]
lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
lowercase = 1 - alpha_prod_t
lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE__ ( snake_case , snake_case , snake_case ):
lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
| 195 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
def a_ ( __lowercase : list , __lowercase : int = 0 ) -> list:
_snake_case = length or len(__lowercase )
_snake_case = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
_snake_case , _snake_case = list_data[i + 1], list_data[i]
_snake_case = True
return list_data if not swapped else bubble_sort(__lowercase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import logging
import os
from .state import PartialState
class A__ ( logging.LoggerAdapter ):
@staticmethod
def a__ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase )
__lowercase = kwargs.pop('in_order' , _UpperCAmelCase )
if self.isEnabledFor(_UpperCAmelCase ):
if self._should_log(_UpperCAmelCase ):
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif in_order:
__lowercase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]:
if log_level is None:
__lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE )
__lowercase = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 325 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
a_ = {
'''n_samples''': 64,
'''horizon''': 32,
'''num_inference_steps''': 20,
'''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network
'''scale_grad_by_std''': True,
'''scale''': 0.1,
'''eta''': 0.0,
'''t_grad_cutoff''': 2,
'''device''': '''cpu''',
}
if __name__ == "__main__":
a_ = '''hopper-medium-v2'''
a_ = gym.make(env_name)
a_ = ValueGuidedRLPipeline.from_pretrained(
'''bglick13/hopper-medium-v2-value-function-hor32''',
env=env,
)
env.seed(0)
a_ = env.reset()
a_ = 0
a_ = 0
a_ = 1000
a_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
a_ = pipeline(obs, planning_horizon=32)
# execute action in environment
a_, a_, a_, a_ = env.step(denorm_actions)
a_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:"
F" {total_score}"
)
# save observations for rendering
rollout.append(next_observation.copy())
a_ = next_observation
except KeyboardInterrupt:
pass
print(F"Total reward: {total_reward}")
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 0 |
import re
from filelock import FileLock
try:
import nltk
_SCREAMING_SNAKE_CASE = True
except (ImportError, ModuleNotFoundError):
_SCREAMING_SNAKE_CASE = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def SCREAMING_SNAKE_CASE__ ( __a ):
re.sub('<n>' , '' , __a ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__a ) )
| 327 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCamelCase_ : List[str] = "vit_msn"
def __init__(self , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1e-06 , __magic_name__=224 , __magic_name__=16 , __magic_name__=3 , __magic_name__=True , **__magic_name__ , ) -> Dict:
'''simple docstring'''
super().__init__(**_UpperCAmelCase )
snake_case_ : Dict = hidden_size
snake_case_ : List[str] = num_hidden_layers
snake_case_ : Tuple = num_attention_heads
snake_case_ : int = intermediate_size
snake_case_ : Optional[Any] = hidden_act
snake_case_ : List[str] = hidden_dropout_prob
snake_case_ : List[str] = attention_probs_dropout_prob
snake_case_ : Any = initializer_range
snake_case_ : List[Any] = layer_norm_eps
snake_case_ : int = image_size
snake_case_ : Union[str, Any] = patch_size
snake_case_ : str = num_channels
snake_case_ : Union[str, Any] = qkv_bias
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase=False ):
"""simple docstring"""
if isinstance(lowercase ,lowercase ) and isinstance(lowercase ,lowercase ):
_UpperCAmelCase = len(set_a.intersection(lowercase ) )
if alternative_union:
_UpperCAmelCase = len(lowercase ) + len(lowercase )
else:
_UpperCAmelCase = len(set_a.union(lowercase ) )
return intersection / union
if isinstance(lowercase ,(list, tuple) ) and isinstance(lowercase ,(list, tuple) ):
_UpperCAmelCase = [element for element in set_a if element in set_b]
if alternative_union:
_UpperCAmelCase = len(lowercase ) + len(lowercase )
return len(lowercase ) / union
else:
_UpperCAmelCase = set_a + [element for element in set_b if element not in set_a]
return len(lowercase ) / len(lowercase )
return len(lowercase ) / len(lowercase )
return None
if __name__ == "__main__":
UpperCAmelCase__ = {"""a""", """b""", """c""", """d""", """e"""}
UpperCAmelCase__ = {"""c""", """d""", """e""", """f""", """h""", """i"""}
print(jaccard_similarity(set_a, set_b))
| 289 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 | 0 |
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a__ : Optional[Any] = '''src/diffusers'''
a__ : Dict = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
a__ : Any = importlib.util.spec_from_file_location(
'''diffusers''',
os.path.join(DIFFUSERS_PATH, '''__init__.py'''),
submodule_search_locations=[DIFFUSERS_PATH],
)
a__ : str = spec.loader.load_module()
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return line.startswith(a__ ) or len(a__ ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , a__ ) is not None
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = object_name.split('''.''' )
SCREAMING_SNAKE_CASE : Any = 0
# First let's find the module where our object lives.
SCREAMING_SNAKE_CASE : int = parts[i]
while i < len(a__ ) and not os.path.isfile(os.path.join(a__ , F"""{module}.py""" ) ):
i += 1
if i < len(a__ ):
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.join(a__ , parts[i] )
if i >= len(a__ ):
raise ValueError(F"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(a__ , F"""{module}.py""" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE : List[Any] = f.readlines()
# Now let's find the class / func in the code!
SCREAMING_SNAKE_CASE : str = ''''''
SCREAMING_SNAKE_CASE : List[Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(a__ ) and re.search(rF"""^{indent}(class|def)\s+{name}(\(|\:)""" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(a__ ):
raise ValueError(F""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
SCREAMING_SNAKE_CASE : Union[str, Any] = line_index
while line_index < len(a__ ) and _should_continue(lines[line_index] , a__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE : Optional[Any] = lines[start_index:line_index]
return "".join(a__ )
a__ : List[Any] = re.compile(r'''^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)''')
a__ : List[str] = re.compile(r'''^\s*(\S+)->(\S+)(\s+.*|$)''')
a__ : Dict = re.compile(r'''<FILL\s+[^>]*>''')
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = code.split('''\n''' )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
while idx < len(a__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(a__ ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = len(get_indent(a__ ) ) > 0
if has_indent:
SCREAMING_SNAKE_CASE : Union[str, Any] = F"""class Bla:\n{code}"""
SCREAMING_SNAKE_CASE : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=a__ )
SCREAMING_SNAKE_CASE : Dict = black.format_str(a__ , mode=a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = style_docstrings_in_code(a__ )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
with open(a__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE : str = f.readlines()
SCREAMING_SNAKE_CASE : List[Any] = []
SCREAMING_SNAKE_CASE : Dict = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(a__ ):
SCREAMING_SNAKE_CASE : List[Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = search.groups()
SCREAMING_SNAKE_CASE : Union[str, Any] = find_code_in_diffusers(a__ )
SCREAMING_SNAKE_CASE : int = get_indent(a__ )
SCREAMING_SNAKE_CASE : Optional[int] = line_index + 1 if indent == theoretical_indent else line_index + 2
SCREAMING_SNAKE_CASE : Optional[int] = theoretical_indent
SCREAMING_SNAKE_CASE : int = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
SCREAMING_SNAKE_CASE : List[Any] = True
while line_index < len(a__ ) and should_continue:
line_index += 1
if line_index >= len(a__ ):
break
SCREAMING_SNAKE_CASE : Optional[int] = lines[line_index]
SCREAMING_SNAKE_CASE : Union[str, Any] = _should_continue(a__ , a__ ) and re.search(F"""^{indent}# End copy""" , a__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''.join(a__ )
# Remove any nested `Copied from` comments to avoid circular copies
SCREAMING_SNAKE_CASE : Union[str, Any] = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(a__ ) is None]
SCREAMING_SNAKE_CASE : Any = '''\n'''.join(a__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(a__ ) > 0:
SCREAMING_SNAKE_CASE : Optional[int] = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
SCREAMING_SNAKE_CASE : int = [_re_replace_pattern.search(a__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = pattern.groups()
SCREAMING_SNAKE_CASE : Any = re.sub(a__ , a__ , a__ )
if option.strip() == "all-casing":
SCREAMING_SNAKE_CASE : int = re.sub(obja.lower() , obja.lower() , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = re.sub(obja.upper() , obja.upper() , a__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
SCREAMING_SNAKE_CASE : Any = blackify(lines[start_index - 1] + theoretical_code )
SCREAMING_SNAKE_CASE : Dict = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
SCREAMING_SNAKE_CASE : Any = lines[:start_index] + [theoretical_code] + lines[line_index:]
SCREAMING_SNAKE_CASE : Any = start_index + 1
if overwrite and len(a__ ) > 0:
# Warn the user a file has been modified.
print(F"""Detected changes, rewriting {filename}.""" )
with open(a__ , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(a__ )
return diffs
def UpperCAmelCase_( a__ = False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = glob.glob(os.path.join(a__ , '''**/*.py''' ) , recursive=a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = []
for filename in all_files:
SCREAMING_SNAKE_CASE : Tuple = is_copy_consistent(a__ , a__ )
diffs += [F"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(a__ ) > 0:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''\n'''.join(a__ )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
a__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a__ : List[str] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 313 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 10
@require_sentencepiece
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[Any] = True
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__lowercase = sp.SentencePieceProcessor()
spm_model.Load(_UpperCAmelCase )
__lowercase = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )]
__lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = '<pad>'
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 10_01 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , )
__lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium"
lowerCAmelCase__ : Dict = "C'est trop cool"
lowerCAmelCase__ : List[Any] = "Esto es genial"
@classmethod
def a__ ( cls : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def a__ ( self : str ) -> int:
"""simple docstring"""
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
__lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2]
__lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'fr'
__lowercase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _UpperCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__lowercase = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 325 | 0 |
from collections import defaultdict
from math import gcd
def _A ( SCREAMING_SNAKE_CASE__ : int = 1500000 ):
UpperCamelCase :Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Union[str, Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , SCREAMING_SNAKE_CASE__ , 2 ):
if gcd(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) > 1:
continue
UpperCamelCase :Optional[int] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(SCREAMING_SNAKE_CASE__ , limit + 1 , SCREAMING_SNAKE_CASE__ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 259 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = version.parse("1.12" )
@property
def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : int ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return 12
def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 325 | 0 |
"""simple docstring"""
def lowercase__ ( snake_case_ :str , snake_case_ :str = " " ):
__UpperCAmelCase = []
__UpperCAmelCase = 0
for index, char in enumerate(snake_case_ ):
if char == separator:
split_words.append(string[last_index:index] )
__UpperCAmelCase = index + 1
elif index + 1 == len(snake_case_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 332 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowercase = self.embedder(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def a__ ( self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = self.layers(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_UpperCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = RegNetConfig
lowerCAmelCase__ : Optional[int] = "regnet"
lowerCAmelCase__ : Dict = "pixel_values"
lowerCAmelCase__ : List[str] = True
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = value
SCREAMING_SNAKE_CASE__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = RegNetEmbeddings(_UpperCAmelCase )
__lowercase = RegNetEncoder(_UpperCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = RegNetModel(_UpperCAmelCase )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = 'single_label_classification'
else:
__lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 325 | 0 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ):
'''simple docstring'''
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = len(SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = (
first_str_length if first_str_length > second_str_length else second_str_length
)
_lowerCAmelCase = []
for char_count in range(SCREAMING_SNAKE_CASE_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 158 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 0 |
"""simple docstring"""
from math import pi
def _A ( UpperCamelCase_ : int, UpperCamelCase_ : int) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 17 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = '''▁'''
UpperCAmelCase = {'''vocab_file''': '''sentencepiece.bpe.model'''}
UpperCAmelCase = {
'''vocab_file''': {
'''facebook/mbart-large-en-ro''': (
'''https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'''
),
'''facebook/mbart-large-cc25''': (
'''https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'''
),
}
}
UpperCAmelCase = {
'''facebook/mbart-large-en-ro''': 1024,
'''facebook/mbart-large-cc25''': 1024,
}
# fmt: off
UpperCAmelCase = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''']
class A_ ( lowerCAmelCase__ ):
'''simple docstring'''
_UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Tuple = ["input_ids", "attention_mask"]
_UpperCamelCase : List[int] = []
_UpperCamelCase : List[int] = []
def __init__( self , snake_case , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=None , snake_case=None , snake_case=None , snake_case = None , snake_case=None , **snake_case , ):
lowercase = AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else mask_token
lowercase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , src_lang=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
lowercase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase = 1
lowercase = len(self.sp_model )
lowercase = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCAmelCase )
}
lowercase = {v: k for k, v in self.lang_code_to_id.items()}
lowercase = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase = src_lang if src_lang is not None else 'en_XX'
lowercase = self.lang_code_to_id[self._src_lang]
lowercase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
lowercase = self.__dict__.copy()
lowercase = None
lowercase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , snake_case ):
lowercase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase = {}
lowercase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return self._src_lang
@src_lang.setter
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None , snake_case = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
lowercase = [1] * len(self.prefix_tokens )
lowercase = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
lowercase = [self.sep_token_id]
lowercase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case , snake_case , snake_case , **snake_case ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowercase = src_lang
lowercase = self(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase )
lowercase = self.convert_tokens_to_ids(_UpperCAmelCase )
lowercase = tgt_lang_id
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
lowercase = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = ''.join(_UpperCAmelCase ).replace(_UpperCAmelCase , ' ' ).strip()
return out_string
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = None ):
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
lowercase = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE__ ( self , snake_case , snake_case = "en_XX" , snake_case = None , snake_case = "ro_RO" , **snake_case , ):
lowercase = src_lang
lowercase = tgt_lang
return super().prepare_seqaseq_batch(_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.set_src_lang_special_tokens(self.src_lang )
def SCREAMING_SNAKE_CASE__ ( self ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.lang_code_to_id[src_lang]
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
def SCREAMING_SNAKE_CASE__ ( self , snake_case ):
lowercase = self.lang_code_to_id[lang]
lowercase = []
lowercase = [self.eos_token_id, self.cur_lang_code]
| 195 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 0 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : int = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS}
def a_ ( __lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : List[Any] , __lowercase : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
_snake_case = TOKENIZER_CLASSES
else:
_snake_case = {tokenizer_name: getattr(__lowercase , tokenizer_name + 'Fast' )}
logger.info(f'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
_snake_case = TOKENIZER_CLASSES[tokenizer_name]
_snake_case = True
if checkpoint_name is None:
_snake_case = list(tokenizer_class.max_model_input_sizes.keys() )
else:
_snake_case = [checkpoint_name]
logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
_snake_case = tokenizer_class.from_pretrained(__lowercase , force_download=__lowercase )
# Save fast tokenizer
logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
_snake_case , _snake_case = checkpoint.split('/' )
_snake_case = os.path.join(__lowercase , __lowercase )
elif add_prefix:
_snake_case = checkpoint
_snake_case = dump_path
else:
_snake_case = None
_snake_case = dump_path
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
_snake_case = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
_snake_case = file_path.split(__lowercase )[-1][0]
if next_char == "/":
_snake_case = os.path.join(__lowercase , __lowercase )
_snake_case = None
logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
_snake_case = tokenizer.save_pretrained(
__lowercase , legacy_format=__lowercase , filename_prefix=__lowercase )
logger.info(f'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(__lowercase )
logger.info(f'''=> removing {file_name}''' )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.'''
)
parser.add_argument(
'''--tokenizer_name''',
default=None,
type=str,
help=(
F'Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '
'''download and convert all the checkpoints from AWS.'''
),
)
parser.add_argument(
'''--checkpoint_name''',
default=None,
type=str,
help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''',
)
parser.add_argument(
'''--force_download''',
action='''store_true''',
help='''Re-download checkpoints.''',
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download) | 282 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [torch.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [tf.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowercase = [tf.convert_to_tensor(_UpperCAmelCase )]
__lowercase = [torch.tensor(_UpperCAmelCase )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 325 | 0 |
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
a_ = logging.get_logger(__name__)
a_ = OrderedDict(
[
# Base model mapping
('''albert''', '''FlaxAlbertModel'''),
('''bart''', '''FlaxBartModel'''),
('''beit''', '''FlaxBeitModel'''),
('''bert''', '''FlaxBertModel'''),
('''big_bird''', '''FlaxBigBirdModel'''),
('''blenderbot''', '''FlaxBlenderbotModel'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''),
('''clip''', '''FlaxCLIPModel'''),
('''distilbert''', '''FlaxDistilBertModel'''),
('''electra''', '''FlaxElectraModel'''),
('''gpt-sw3''', '''FlaxGPT2Model'''),
('''gpt2''', '''FlaxGPT2Model'''),
('''gpt_neo''', '''FlaxGPTNeoModel'''),
('''gptj''', '''FlaxGPTJModel'''),
('''longt5''', '''FlaxLongT5Model'''),
('''marian''', '''FlaxMarianModel'''),
('''mbart''', '''FlaxMBartModel'''),
('''mt5''', '''FlaxMT5Model'''),
('''opt''', '''FlaxOPTModel'''),
('''pegasus''', '''FlaxPegasusModel'''),
('''regnet''', '''FlaxRegNetModel'''),
('''resnet''', '''FlaxResNetModel'''),
('''roberta''', '''FlaxRobertaModel'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''),
('''roformer''', '''FlaxRoFormerModel'''),
('''t5''', '''FlaxT5Model'''),
('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''),
('''vit''', '''FlaxViTModel'''),
('''wav2vec2''', '''FlaxWav2Vec2Model'''),
('''whisper''', '''FlaxWhisperModel'''),
('''xglm''', '''FlaxXGLMModel'''),
('''xlm-roberta''', '''FlaxXLMRobertaModel'''),
]
)
a_ = OrderedDict(
[
# Model for pre-training mapping
('''albert''', '''FlaxAlbertForPreTraining'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForPreTraining'''),
('''big_bird''', '''FlaxBigBirdForPreTraining'''),
('''electra''', '''FlaxElectraForPreTraining'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a_ = OrderedDict(
[
# Model for Masked LM mapping
('''albert''', '''FlaxAlbertForMaskedLM'''),
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''bert''', '''FlaxBertForMaskedLM'''),
('''big_bird''', '''FlaxBigBirdForMaskedLM'''),
('''distilbert''', '''FlaxDistilBertForMaskedLM'''),
('''electra''', '''FlaxElectraForMaskedLM'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''roberta''', '''FlaxRobertaForMaskedLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''),
('''roformer''', '''FlaxRoFormerForMaskedLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''),
]
)
a_ = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('''bart''', '''FlaxBartForConditionalGeneration'''),
('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''),
('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''),
('''encoder-decoder''', '''FlaxEncoderDecoderModel'''),
('''longt5''', '''FlaxLongT5ForConditionalGeneration'''),
('''marian''', '''FlaxMarianMTModel'''),
('''mbart''', '''FlaxMBartForConditionalGeneration'''),
('''mt5''', '''FlaxMT5ForConditionalGeneration'''),
('''pegasus''', '''FlaxPegasusForConditionalGeneration'''),
('''t5''', '''FlaxT5ForConditionalGeneration'''),
]
)
a_ = OrderedDict(
[
# Model for Image-classsification
('''beit''', '''FlaxBeitForImageClassification'''),
('''regnet''', '''FlaxRegNetForImageClassification'''),
('''resnet''', '''FlaxResNetForImageClassification'''),
('''vit''', '''FlaxViTForImageClassification'''),
]
)
a_ = OrderedDict(
[
('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''),
]
)
a_ = OrderedDict(
[
# Model for Causal LM mapping
('''bart''', '''FlaxBartForCausalLM'''),
('''bert''', '''FlaxBertForCausalLM'''),
('''big_bird''', '''FlaxBigBirdForCausalLM'''),
('''electra''', '''FlaxElectraForCausalLM'''),
('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''),
('''gpt2''', '''FlaxGPT2LMHeadModel'''),
('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''),
('''gptj''', '''FlaxGPTJForCausalLM'''),
('''opt''', '''FlaxOPTForCausalLM'''),
('''roberta''', '''FlaxRobertaForCausalLM'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''),
('''xglm''', '''FlaxXGLMForCausalLM'''),
('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''),
]
)
a_ = OrderedDict(
[
# Model for Sequence Classification mapping
('''albert''', '''FlaxAlbertForSequenceClassification'''),
('''bart''', '''FlaxBartForSequenceClassification'''),
('''bert''', '''FlaxBertForSequenceClassification'''),
('''big_bird''', '''FlaxBigBirdForSequenceClassification'''),
('''distilbert''', '''FlaxDistilBertForSequenceClassification'''),
('''electra''', '''FlaxElectraForSequenceClassification'''),
('''mbart''', '''FlaxMBartForSequenceClassification'''),
('''roberta''', '''FlaxRobertaForSequenceClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''),
('''roformer''', '''FlaxRoFormerForSequenceClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''),
]
)
a_ = OrderedDict(
[
# Model for Question Answering mapping
('''albert''', '''FlaxAlbertForQuestionAnswering'''),
('''bart''', '''FlaxBartForQuestionAnswering'''),
('''bert''', '''FlaxBertForQuestionAnswering'''),
('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''),
('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''),
('''electra''', '''FlaxElectraForQuestionAnswering'''),
('''mbart''', '''FlaxMBartForQuestionAnswering'''),
('''roberta''', '''FlaxRobertaForQuestionAnswering'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''),
('''roformer''', '''FlaxRoFormerForQuestionAnswering'''),
('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''),
]
)
a_ = OrderedDict(
[
# Model for Token Classification mapping
('''albert''', '''FlaxAlbertForTokenClassification'''),
('''bert''', '''FlaxBertForTokenClassification'''),
('''big_bird''', '''FlaxBigBirdForTokenClassification'''),
('''distilbert''', '''FlaxDistilBertForTokenClassification'''),
('''electra''', '''FlaxElectraForTokenClassification'''),
('''roberta''', '''FlaxRobertaForTokenClassification'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''),
('''roformer''', '''FlaxRoFormerForTokenClassification'''),
('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''),
]
)
a_ = OrderedDict(
[
# Model for Multiple Choice mapping
('''albert''', '''FlaxAlbertForMultipleChoice'''),
('''bert''', '''FlaxBertForMultipleChoice'''),
('''big_bird''', '''FlaxBigBirdForMultipleChoice'''),
('''distilbert''', '''FlaxDistilBertForMultipleChoice'''),
('''electra''', '''FlaxElectraForMultipleChoice'''),
('''roberta''', '''FlaxRobertaForMultipleChoice'''),
('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''),
('''roformer''', '''FlaxRoFormerForMultipleChoice'''),
('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''),
]
)
a_ = OrderedDict(
[
('''bert''', '''FlaxBertForNextSentencePrediction'''),
]
)
a_ = OrderedDict(
[
('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''),
('''whisper''', '''FlaxWhisperForConditionalGeneration'''),
]
)
a_ = OrderedDict(
[
('''whisper''', '''FlaxWhisperForAudioClassification'''),
]
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
a_ = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_MAPPING
a_ = auto_class_update(FlaxAutoModel)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_PRETRAINING_MAPPING
a_ = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_MASKED_LM_MAPPING
a_ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='''sequence classification'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
a_ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='''token classification'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
a_ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
a_ = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='''image classification'''
)
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
a_ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''')
class lowercase__ ( _BaseAutoModelClass ):
a_ =FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
a_ = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling'''
)
| 340 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 | 0 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ , unittest.TestCase ):
__magic_name__: str = GPTaTokenizer
__magic_name__: List[Any] = GPTaTokenizerFast
__magic_name__: Tuple = True
__magic_name__: Dict = {"add_prefix_space": True}
__magic_name__: str = False
def UpperCAmelCase_ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
snake_case_ : Union[str, Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
snake_case_ : List[Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case_ : List[str] = {'unk_token': '<unk>'}
snake_case_ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def UpperCAmelCase_ ( self : Optional[int] , **_A : Tuple ) -> List[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCAmelCase_ ( self : List[Any] , **_A : Any ) -> List[str]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCAmelCase_ ( self : Any , _A : Optional[Any] ) -> int:
"""simple docstring"""
snake_case_ : Dict = 'lower newer'
snake_case_ : str = 'lower newer'
return input_text, output_text
def UpperCAmelCase_ ( self : int ) -> List[str]:
"""simple docstring"""
snake_case_ : Tuple = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case_ : List[Any] = 'lower newer'
snake_case_ : List[str] = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
snake_case_ : Any = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : Optional[Any] = tokens + [tokenizer.unk_token]
snake_case_ : Any = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
if not self.test_rust_tokenizer:
return
snake_case_ : int = self.get_tokenizer()
snake_case_ : str = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
snake_case_ : Dict = 'lower newer'
# Testing tokenization
snake_case_ : Any = tokenizer.tokenize(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
snake_case_ : int = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids without special tokens
snake_case_ : Any = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
snake_case_ : Optional[Any] = rust_tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing conversion to ids with special tokens
snake_case_ : Optional[int] = self.get_rust_tokenizer(add_prefix_space=_UpperCAmelCase )
snake_case_ : Optional[Any] = tokenizer.encode(_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
snake_case_ : str = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
# Testing the unknown token
snake_case_ : str = tokens + [rust_tokenizer.unk_token]
snake_case_ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCAmelCase_ ( self : Dict , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Optional[Any] , _A : Tuple=15 ) -> Any:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case_ : Any = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# Simple input
snake_case_ : Any = 'This is a simple input'
snake_case_ : Optional[int] = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ : Optional[Any] = ('This is a simple input', 'This is a pair')
snake_case_ : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Simple input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(_UpperCAmelCase , tokenizer_r.encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' )
# Pair input
self.assertRaises(
_UpperCAmelCase , tokenizer_r.batch_encode_plus , _UpperCAmelCase , max_length=_UpperCAmelCase , padding='max_length' , )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : List[str] = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
snake_case_ : Optional[int] = 'This is a simple input'
snake_case_ : List[Any] = ['This is a simple input looooooooong', 'This is a simple input']
snake_case_ : List[Any] = ('This is a simple input', 'This is a pair')
snake_case_ : List[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
snake_case_ : Any = tokenizer.pad_token_id
snake_case_ : Optional[Any] = tokenizer(_UpperCAmelCase , padding='max_length' , max_length=30 , return_tensors='np' )
snake_case_ : Any = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors='np' )
snake_case_ : Optional[int] = tokenizer(*_UpperCAmelCase , padding='max_length' , max_length=60 , return_tensors='np' )
snake_case_ : int = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncate=_UpperCAmelCase , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
"""simple docstring"""
snake_case_ : List[Any] = '$$$'
snake_case_ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )
snake_case_ : List[str] = 'This is a simple input'
snake_case_ : Union[str, Any] = ['This is a simple input 1', 'This is a simple input 2']
snake_case_ : List[str] = tokenizer.bos_token_id
snake_case_ : Any = tokenizer(_UpperCAmelCase )
snake_case_ : int = tokenizer(_UpperCAmelCase )
self.assertEqual(out_s.input_ids[0] , _UpperCAmelCase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
snake_case_ : List[str] = tokenizer.decode(out_s.input_ids )
snake_case_ : Union[str, Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _UpperCAmelCase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : Union[str, Any] = [self.get_tokenizer(do_lower_case=_UpperCAmelCase , add_bos_token=_UpperCAmelCase )]
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
snake_case_ : Optional[Any] = 'Encode this.'
snake_case_ : Dict = 'This one too please.'
snake_case_ : List[str] = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
encoded_sequence += tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
snake_case_ : Any = tokenizer.encode_plus(
_UpperCAmelCase , _UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_special_tokens_mask=_UpperCAmelCase , )
snake_case_ : Tuple = encoded_sequence_dict['input_ids']
snake_case_ : List[str] = encoded_sequence_dict['special_tokens_mask']
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
snake_case_ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_UpperCAmelCase )
]
snake_case_ : Optional[Any] = [x for x in filtered_sequence if x is not None]
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_tokenizers
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ : Any = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_UpperCAmelCase )
snake_case_ : Tuple = 'A photo of a cat'
snake_case_ : int = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('test_opt' )
snake_case_ : List[str] = AutoTokenizer.from_pretrained('./test_opt' )
snake_case_ : Optional[Any] = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ : Optional[int] = AutoTokenizer.from_pretrained('facebook/opt-350m' , use_slow=_UpperCAmelCase )
snake_case_ : List[Any] = 'A photo of a cat'
snake_case_ : Optional[int] = tokenizer.encode(
_UpperCAmelCase , )
# Same as above
self.assertEqual(_UpperCAmelCase , [2, 250, 1345, 9, 10, 4758] )
@unittest.skip('This test is failing because of a bug in the fast tokenizer' )
def UpperCAmelCase_ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ : str = AutoTokenizer.from_pretrained('facebook/opt-350m' , from_slow=_UpperCAmelCase )
snake_case_ : Optional[Any] = 'bos'
snake_case_ : Optional[Any] = tokenizer.get_vocab()['bos']
snake_case_ : List[str] = 'A photo of a cat'
snake_case_ : Any = tokenizer.encode(
_UpperCAmelCase , )
# We changed the bos token
self.assertEqual(_UpperCAmelCase , [31957, 250, 1345, 9, 10, 4758] )
tokenizer.save_pretrained('./tok' )
snake_case_ : str = AutoTokenizer.from_pretrained('./tok' )
self.assertTrue(tokenizer.is_fast )
snake_case_ : Any = tokenizer.encode(
_UpperCAmelCase , )
self.assertEqual(_UpperCAmelCase , [31957, 250, 1345, 9, 10, 4758] )
| 327 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325 | 0 |
from __future__ import annotations
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> bool:
"""simple docstring"""
snake_case_ : Any = get_failure_array(_UpperCamelCase )
# 2) Step through text searching for pattern
snake_case_ , snake_case_ : Union[str, Any] = 0, 0 # index into text, pattern
while i < len(_UpperCamelCase ):
if pattern[j] == text[i]:
if j == (len(_UpperCamelCase ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
snake_case_ : List[str] = failure[j - 1]
continue
i += 1
return False
def lowerCamelCase_ ( _UpperCamelCase ) -> list[int]:
"""simple docstring"""
snake_case_ : Union[str, Any] = [0]
snake_case_ : Tuple = 0
snake_case_ : Tuple = 1
while j < len(_UpperCamelCase ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
snake_case_ : List[Any] = failure[i - 1]
continue
j += 1
failure.append(_UpperCamelCase )
return failure
if __name__ == "__main__":
# Test 1)
lowerCAmelCase_ = '''abc1abc12'''
lowerCAmelCase_ = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
lowerCAmelCase_ = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
lowerCAmelCase_ = '''ABABX'''
lowerCAmelCase_ = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
lowerCAmelCase_ = '''AAAB'''
lowerCAmelCase_ = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
lowerCAmelCase_ = '''abcdabcy'''
lowerCAmelCase_ = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
lowerCAmelCase_ = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 279 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
for attribute in key.split('.' ):
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase = None
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowercase = True
elif name.split('.' )[0] == "proj":
__lowercase = fairseq_model.proj
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowercase = 'weight_g'
elif "weight_v" in name:
__lowercase = 'weight_v'
elif "bias" in name:
__lowercase = 'bias'
elif "weight" in name:
__lowercase = 'weight'
else:
__lowercase = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
__lowercase = full_name.split('conv_layers.' )[-1]
__lowercase = name.split('.' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowercase = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__lowercase = f.readlines()
__lowercase = [line.split(' ' )[0] for line in lines]
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
__lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase = model[0].eval()
# set weights for wav2vec2 encoder
__lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowercase = False
# add projection layer
__lowercase = nn.Parameter(projection_layer.weight )
__lowercase = nn.Parameter(projection_layer.bias )
__lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = hf_wavavec.config.to_dict()
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer.eos_token_id
__lowercase = 'speech_to_text_2'
__lowercase = 'wav2vec2'
__lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 325 | 0 |
"""simple docstring"""
import math
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(lowercase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __UpperCAmelCase ( lowercase = 1_00_01 ):
"""simple docstring"""
try:
_UpperCAmelCase = int(lowercase )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
_UpperCAmelCase = []
_UpperCAmelCase = 2
while len(lowercase ) < nth:
if is_prime(lowercase ):
primes.append(lowercase )
num += 1
else:
num += 1
return primes[len(lowercase ) - 1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 289 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325 | 0 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a__ : Tuple = logging.get_logger(__name__)
# General docstring
a__ : Dict = '''RegNetConfig'''
# Base docstring
a__ : Any = '''facebook/regnet-y-040'''
a__ : str = [1, 1_088, 7, 7]
# Image classification docstring
a__ : int = '''facebook/regnet-y-040'''
a__ : Dict = '''tabby, tabby cat'''
a__ : Tuple = [
'''facebook/regnet-y-040''',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 3 , _lowerCamelCase = 1 , _lowerCamelCase = 1 , _lowerCamelCase = "relu" , ) ->Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE : str = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.BatchNormad(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : Any = self.convolution(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Any = self.normalization(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = self.activation(_UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Any:
super().__init__()
SCREAMING_SNAKE_CASE : Any = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
SCREAMING_SNAKE_CASE : Tuple = config.num_channels
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE : int = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
SCREAMING_SNAKE_CASE : Optional[Any] = self.embedder(_UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 2 ) ->Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE : str = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = nn.BatchNormad(_UpperCAmelCase )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Tensor:
SCREAMING_SNAKE_CASE : int = self.convolution(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.normalization(_UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) ->str:
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->str:
SCREAMING_SNAKE_CASE : int = self.pooler(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = self.attention(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = hidden_state * attention
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 ) ->Tuple:
super().__init__()
SCREAMING_SNAKE_CASE : List[Any] = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : str = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE : Any = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
SCREAMING_SNAKE_CASE : List[Any] = ACTaFN[config.hidden_act]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_state
SCREAMING_SNAKE_CASE : List[Any] = self.layer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Dict = self.shortcut(_UpperCAmelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE : Any = self.activation(_UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 1 ) ->Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE : Union[str, Any] = in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE : List[str] = max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE : Any = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE : int = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
SCREAMING_SNAKE_CASE : int = ACTaFN[config.hidden_act]
def __lowerCAmelCase ( self , _lowerCamelCase ) ->List[str]:
SCREAMING_SNAKE_CASE : List[Any] = hidden_state
SCREAMING_SNAKE_CASE : int = self.layer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.shortcut(_UpperCAmelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE : Tuple = self.activation(_UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = 2 , _lowerCamelCase = 2 , ) ->Dict:
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = RegNetXLayer if config.layer_type == '''x''' else RegNetYLayer
SCREAMING_SNAKE_CASE : int = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def __lowerCAmelCase ( self , _lowerCamelCase ) ->int:
SCREAMING_SNAKE_CASE : int = self.layers(_UpperCAmelCase )
return hidden_state
class a_ ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->int:
super().__init__()
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
SCREAMING_SNAKE_CASE : Optional[Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = True ) ->BaseModelOutputWithNoAttention:
SCREAMING_SNAKE_CASE : Any = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE : Any = hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE : Optional[Any] = stage_module(_UpperCAmelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class a_ ( lowerCAmelCase__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = RegNetConfig
__SCREAMING_SNAKE_CASE : Optional[int] = "regnet"
__SCREAMING_SNAKE_CASE : Dict = "pixel_values"
__SCREAMING_SNAKE_CASE : List[str] = True
def __lowerCAmelCase ( self , _lowerCamelCase ) ->Dict:
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase=False ) ->Dict:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE : Any = value
a__ : Union[str, Any] = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ : List[str] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class a_ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->str:
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[int] = config
SCREAMING_SNAKE_CASE : int = RegNetEmbeddings(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = RegNetEncoder(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCAmelCase ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ) ->BaseModelOutputWithPoolingAndNoAttention:
SCREAMING_SNAKE_CASE : Union[str, Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Optional[int] = self.embedder(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = encoder_outputs[0]
SCREAMING_SNAKE_CASE : Union[str, Any] = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class a_ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self , _lowerCamelCase ) ->Tuple:
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = config.num_labels
SCREAMING_SNAKE_CASE : Dict = RegNetModel(_UpperCAmelCase )
# classification head
SCREAMING_SNAKE_CASE : str = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCAmelCase ( self , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , ) ->ImageClassifierOutputWithNoAttention:
SCREAMING_SNAKE_CASE : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Union[str, Any] = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
SCREAMING_SNAKE_CASE : List[str] = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : Tuple = self.classifier(_UpperCAmelCase )
SCREAMING_SNAKE_CASE : int = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : Optional[Any] = '''regression'''
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE : List[Any] = '''single_label_classification'''
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''multi_label_classification'''
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE : Optional[int] = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : str = loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE : Optional[int] = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE : Any = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE : List[str] = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE : Any = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
SCREAMING_SNAKE_CASE : Optional[int] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 313 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["vqvae"]
def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_UpperCAmelCase , device=self.device , )
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase )
__lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 2_55) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=_UpperCAmelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _UpperCAmelCase ):
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample']
else:
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
if isinstance(self.scheduler , _UpperCAmelCase ):
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
else:
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_UpperCAmelCase )['sample']
__lowercase = (images / 2 + 0.5).clamp(0 , 1 )
__lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowercase = (images * 2_55).round().astype('uint8' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) )
@torch.no_grad()
def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 2_55) * 2 - 1
__lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor:
"""simple docstring"""
__lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
| 325 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , ) -> Tuple:
UpperCamelCase :List[str] = parent
UpperCamelCase :int = 13
UpperCamelCase :Optional[int] = 7
UpperCamelCase :List[str] = 30
UpperCamelCase :List[str] = self.seq_length + self.mem_len
UpperCamelCase :Optional[int] = 15
UpperCamelCase :Dict = True
UpperCamelCase :List[str] = True
UpperCamelCase :Union[str, Any] = 99
UpperCamelCase :int = [10, 50, 80]
UpperCamelCase :List[str] = 32
UpperCamelCase :List[str] = 32
UpperCamelCase :List[Any] = 4
UpperCamelCase :Tuple = 8
UpperCamelCase :str = 128
UpperCamelCase :Any = 2
UpperCamelCase :str = 2
UpperCamelCase :str = None
UpperCamelCase :Tuple = 1
UpperCamelCase :Dict = 0
UpperCamelCase :Optional[Any] = 3
UpperCamelCase :Any = self.vocab_size - 1
UpperCamelCase :str = 0.01
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :List[Any] = None
if self.use_labels:
UpperCamelCase :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase :Union[str, Any] = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCAmelCase ( self ) -> Dict:
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
UpperCamelCase :int = TFTransfoXLModel(_UpperCAmelCase )
UpperCamelCase , UpperCamelCase :Dict = model(_UpperCAmelCase ).to_tuple()
UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
UpperCamelCase , UpperCamelCase :str = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase :Tuple = TFTransfoXLLMHeadModel(_UpperCAmelCase )
UpperCamelCase , UpperCamelCase :str = model(_UpperCAmelCase ).to_tuple()
UpperCamelCase :List[Any] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
UpperCamelCase , UpperCamelCase :List[Any] = model(_UpperCAmelCase ).to_tuple()
UpperCamelCase , UpperCamelCase :Tuple = model([input_ids_a, mems_a] ).to_tuple()
UpperCamelCase :Any = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
UpperCamelCase , UpperCamelCase :List[Any] = model(_UpperCAmelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase :List[str] = TFTransfoXLForSequenceClassification(_UpperCAmelCase )
UpperCamelCase :Optional[int] = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) :Any = config_and_inputs
UpperCamelCase :str = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Any =(
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase_ : List[Any] =() if is_tf_available() else ()
UpperCamelCase_ : Optional[Any] =(
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : int =False
UpperCamelCase_ : Union[str, Any] =False
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCAmelCase ( self ) -> Dict:
UpperCamelCase :Union[str, Any] = TFTransfoXLModelTester(self )
UpperCamelCase :str = ConfigTester(self , config_class=_UpperCAmelCase , d_embed=37 )
def UpperCAmelCase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> Any:
self.model_tester.set_seed()
UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*_UpperCAmelCase )
def UpperCAmelCase ( self ) -> List[str]:
self.model_tester.set_seed()
UpperCamelCase :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*_UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[Any]:
UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*_UpperCAmelCase )
def UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase , UpperCamelCase :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase :List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCamelCase :Optional[Any] = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCamelCase :Dict = model.get_output_embeddings()
assert isinstance(_UpperCAmelCase , tf.keras.layers.Layer )
UpperCamelCase :List[str] = model.get_bias()
assert name is None
else:
UpperCamelCase :Optional[int] = model.get_output_embeddings()
assert x is None
UpperCamelCase :Union[str, Any] = model.get_bias()
assert name is None
def UpperCAmelCase ( self ) -> Optional[Any]:
pass
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase :List[str] = TFTransfoXLModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCAmelCase ( self ) -> str:
pass
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCAmelCase ( self ) -> Any:
UpperCamelCase :Union[str, Any] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
UpperCamelCase :str = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCamelCase :Any = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCamelCase :Any = model.generate(_UpperCAmelCase , max_length=200 , do_sample=_UpperCAmelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , _UpperCAmelCase )
| 259 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ = 10
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowercase = one_third - 1
elif array[two_third] < target:
__lowercase = two_third + 1
else:
__lowercase = one_third + 1
__lowercase = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 325 | 0 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowercase__ ( snake_case_ :Dict ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _UpperCAmelCase ( lowerCAmelCase__ ):
@staticmethod
def a ( _lowercase : ArgumentParser ):
__UpperCAmelCase = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=_UpperCAmelCase , default=_UpperCAmelCase , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=_UpperCAmelCase , help='''Name of the model to download''' )
download_parser.set_defaults(func=_UpperCAmelCase )
def __init__( self : Optional[Any] , _lowercase : str , _lowercase : str , _lowercase : bool , _lowercase : bool ):
__UpperCAmelCase = model
__UpperCAmelCase = cache
__UpperCAmelCase = force
__UpperCAmelCase = trust_remote_code
def a ( self : int ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 332 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325 | 0 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( lowerCAmelCase__ ,unittest.TestCase ):
__lowerCamelCase : List[Any] = KandinskyVaaPriorPipeline
__lowerCamelCase : Optional[int] = ["prompt"]
__lowerCamelCase : Union[str, Any] = ["prompt", "negative_prompt"]
__lowerCamelCase : Tuple = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
__lowerCamelCase : int = False
@property
def _snake_case ( self ) -> int:
return 32
@property
def _snake_case ( self ) -> Tuple:
return 32
@property
def _snake_case ( self ) -> Dict:
return self.time_input_dim
@property
def _snake_case ( self ) -> List[Any]:
return self.time_input_dim * 4
@property
def _snake_case ( self ) -> Dict:
return 100
@property
def _snake_case ( self ) -> Any:
_lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
return tokenizer
@property
def _snake_case ( self ) -> List[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_UpperCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
torch.manual_seed(0 )
_lowerCAmelCase = {
"num_attention_heads": 2,
"attention_head_dim": 12,
"embedding_dim": self.text_embedder_hidden_size,
"num_layers": 1,
}
_lowerCAmelCase = PriorTransformer(**_UpperCAmelCase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
_lowerCAmelCase = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _snake_case ( self ) -> str:
torch.manual_seed(0 )
_lowerCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
_lowerCAmelCase = CLIPVisionModelWithProjection(_UpperCAmelCase )
return model
@property
def _snake_case ( self ) -> Dict:
_lowerCAmelCase = CLIPImageProcessor(
crop_size=224 , do_center_crop=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_resize=_UpperCAmelCase , image_mean=[0.48145466, 0.4578275, 0.40821073] , image_std=[0.26862954, 0.26130258, 0.27577711] , resample=3 , size=224 , )
return image_processor
def _snake_case ( self ) -> Optional[int]:
_lowerCAmelCase = self.dummy_prior
_lowerCAmelCase = self.dummy_image_encoder
_lowerCAmelCase = self.dummy_text_encoder
_lowerCAmelCase = self.dummy_tokenizer
_lowerCAmelCase = self.dummy_image_processor
_lowerCAmelCase = UnCLIPScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=_UpperCAmelCase , clip_sample_range=10.0 , )
_lowerCAmelCase = {
"prior": prior,
"image_encoder": image_encoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"scheduler": scheduler,
"image_processor": image_processor,
}
return components
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase=0 ) -> str:
if str(_UpperCAmelCase ).startswith("mps" ):
_lowerCAmelCase = torch.manual_seed(_UpperCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_lowerCAmelCase = {
"prompt": "horse",
"generator": generator,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def _snake_case ( self ) -> List[Any]:
_lowerCAmelCase = "cpu"
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_UpperCAmelCase )
_lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_lowerCAmelCase = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
_lowerCAmelCase = output.image_embeds
_lowerCAmelCase = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
_lowerCAmelCase = image[0, -10:]
_lowerCAmelCase = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
_lowerCAmelCase = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = torch_device == "cpu"
_lowerCAmelCase = True
_lowerCAmelCase = False
self._test_inference_batch_single_identical(
test_max_difference=_UpperCAmelCase , relax_max_difference=_UpperCAmelCase , test_mean_pixel_difference=_UpperCAmelCase , )
@skip_mps
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = torch_device == "cpu"
_lowerCAmelCase = False
self._test_attention_slicing_forward_pass(
test_max_difference=_UpperCAmelCase , test_mean_pixel_difference=_UpperCAmelCase , )
| 158 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 0 |
"""simple docstring"""
_a = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_a = [{'type': 'code', 'content': INSTALL_CONTENT}]
_a = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 17 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325 | 0 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for attribute in key.split('.' ):
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
lowercase = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
else:
lowercase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = []
lowercase = fairseq_model.state_dict()
lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
lowercase = None
for name, value in fairseq_dict.items():
lowercase = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
lowercase = True
elif name.split('.' )[0] == "proj":
lowercase = fairseq_model.proj
lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(__SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
lowercase = mapped_key.replace('*' , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
lowercase = 'weight_g'
elif "weight_v" in name:
lowercase = 'weight_v'
elif "bias" in name:
lowercase = 'bias'
elif "weight" in name:
lowercase = 'weight'
else:
lowercase = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(F'''Unused weights: {unused_weights}''' )
return proj_weight
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase = full_name.split('conv_layers.' )[-1]
lowercase = name.split('.' )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase , lowercase = emb.weight.shape
lowercase = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE )
lowercase = emb.weight.data
return lin_layer
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
lowercase = f.readlines()
lowercase = [line.split(' ' )[0] for line in lines]
lowercase = len(__SCREAMING_SNAKE_CASE )
lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(__SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
lowercase = WavaVecaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
lowercase = SpeechaTextaConfig.from_pretrained(
__SCREAMING_SNAKE_CASE , vocab_size=__SCREAMING_SNAKE_CASE , decoder_layers=__SCREAMING_SNAKE_CASE , do_stable_layer_norm=__SCREAMING_SNAKE_CASE )
lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__SCREAMING_SNAKE_CASE , return_attention_mask=__SCREAMING_SNAKE_CASE , )
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
lowercase = model[0].eval()
# set weights for wav2vec2 encoder
lowercase = WavaVecaModel(__SCREAMING_SNAKE_CASE )
lowercase = recursively_load_weights_wavaveca(model.encoder , __SCREAMING_SNAKE_CASE )
lowercase = SpeechaTextaForCausalLM(__SCREAMING_SNAKE_CASE )
lowercase , lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' )
logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' )
lowercase = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE )
lowercase = False
# add projection layer
lowercase = nn.Parameter(projection_layer.weight )
lowercase = nn.Parameter(projection_layer.bias )
lowercase = create_vocab_dict(__SCREAMING_SNAKE_CASE )
with open(os.path.join(__SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = SpeechaTextaTokenizer(os.path.join(__SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
lowercase = hf_wavavec.config.to_dict()
lowercase = tokenizer.pad_token_id
lowercase = tokenizer.bos_token_id
lowercase = tokenizer.eos_token_id
lowercase = 'speech_to_text_2'
lowercase = 'wav2vec2'
lowercase = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
UpperCAmelCase = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 195 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : List[Any] = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
_lowerCamelCase : Tuple = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
_lowerCamelCase : Union[str, Any] = '''▁'''
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
_UpperCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Optional[Any] = ["input_ids", "token_type_ids"]
_UpperCAmelCase : Any = FNetTokenizer
def __init__( self : Tuple , lowercase : Optional[int]=None , lowercase : int=None , lowercase : Dict=False , lowercase : List[Any]=True , lowercase : Union[str, Any]=True , lowercase : Dict="<unk>" , lowercase : int="[SEP]" , lowercase : Union[str, Any]="<pad>" , lowercase : Optional[int]="[CLS]" , lowercase : List[Any]="[MASK]" , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = (
AddedToken(_UpperCAmelCase , lstrip=_UpperCAmelCase , rstrip=_UpperCAmelCase , normalized=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase )
else mask_token
)
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , remove_space=_UpperCAmelCase , keep_accents=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , **_UpperCAmelCase , )
_snake_case = do_lower_case
_snake_case = remove_space
_snake_case = keep_accents
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : Optional[Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A ( self : Union[str, Any] , lowercase : List[int] , lowercase : Optional[List[int]] = None ):
'''simple docstring'''
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A ( self : Dict , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
return (out_vocab_file,) | 282 |
import logging
import os
from .state import PartialState
class A__ ( logging.LoggerAdapter ):
@staticmethod
def a__ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase )
__lowercase = kwargs.pop('in_order' , _UpperCAmelCase )
if self.isEnabledFor(_UpperCAmelCase ):
if self._should_log(_UpperCAmelCase ):
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif in_order:
__lowercase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]:
if log_level is None:
__lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE )
__lowercase = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 325 | 0 |
from manim import *
class lowercase__ ( lowerCAmelCase__ ):
def UpperCAmelCase ( self )-> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = Rectangle(height=0.5 , width=0.5 )
lowerCAmelCase__ = Rectangle(height=0.25 , width=0.25 )
lowerCAmelCase__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowerCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowerCAmelCase__ = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("CPU" , font_size=24 )
lowerCAmelCase__ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
lowerCAmelCase__ = [mem.copy() for i in range(4 )]
lowerCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("GPU" , font_size=24 )
lowerCAmelCase__ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.move_to([-1, -1, 0] )
self.add(_UpperCAmelCase )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("Model" , font_size=24 )
lowerCAmelCase__ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.add(_UpperCAmelCase )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i, rect in enumerate(_UpperCAmelCase ):
rect.set_stroke(_UpperCAmelCase )
lowerCAmelCase__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_UpperCAmelCase )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=_UpperCAmelCase , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=_UpperCAmelCase , buff=0.0 )
self.add(_UpperCAmelCase )
model_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase , *_UpperCAmelCase )
lowerCAmelCase__ = [mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("Loaded Checkpoint" , font_size=24 )
lowerCAmelCase__ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
checkpoint.move_to([3, 0.5, 0] )
self.add(_UpperCAmelCase )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i, rect in enumerate(_UpperCAmelCase ):
lowerCAmelCase__ = fill.copy().set_fill(_UpperCAmelCase , opacity=0.7 )
target.move_to(_UpperCAmelCase )
ckpt_arr.append(_UpperCAmelCase )
lowerCAmelCase__ = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(_UpperCAmelCase )
self.add(*_UpperCAmelCase , *_UpperCAmelCase )
lowerCAmelCase__ = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowerCAmelCase__ = MarkupText(
F"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase__ = MarkupText(
F"<span fgcolor='{BLUE}'>●</span> Checkpoint" , font_size=18 , )
blue_text.next_to(_UpperCAmelCase , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(_UpperCAmelCase )
lowerCAmelCase__ = MarkupText(
F"Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device." , font_size=24 , )
step_a.move_to([2, 2, 0] )
lowerCAmelCase__ = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ = [meta_mem.copy() for i in range(6 )]
lowerCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowerCAmelCase__ = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowerCAmelCase__ = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowerCAmelCase__ = Text("Disk" , font_size=24 )
lowerCAmelCase__ = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
disk.move_to([-4.0, -1.25, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) , Write(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) )
lowerCAmelCase__ = []
for i, rect in enumerate(_UpperCAmelCase ):
lowerCAmelCase__ = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(_UpperCAmelCase , run_time=1.5 ) )
self.play(*_UpperCAmelCase )
self.play(FadeOut(_UpperCAmelCase ) )
lowerCAmelCase__ = MarkupText(F"Then, the checkpoint is removed from memory\nthrough garbage collection." , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=3 ) )
self.play(
FadeOut(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , *_UpperCAmelCase ) , )
self.wait()
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 0 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( lowerCAmelCase__ , unittest.TestCase ):
__magic_name__: int = DebertaTokenizer
__magic_name__: Any = True
__magic_name__: int = DebertaTokenizerFast
def UpperCAmelCase_ ( self : List[Any] ) -> str:
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case_ : Dict = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
snake_case_ : Optional[Any] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
snake_case_ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
snake_case_ : Any = {'unk_token': '[UNK]'}
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case_ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_UpperCAmelCase ) )
def UpperCAmelCase_ ( self : Union[str, Any] , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def UpperCAmelCase_ ( self : Optional[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
snake_case_ : Tuple = 'lower newer'
snake_case_ : int = 'lower newer'
return input_text, output_text
def UpperCAmelCase_ ( self : List[Any] ) -> Any:
"""simple docstring"""
snake_case_ : List[Any] = self.get_tokenizer()
snake_case_ : Union[str, Any] = 'lower newer'
snake_case_ : int = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
snake_case_ : Union[str, Any] = tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : List[Any] = tokens + [tokenizer.unk_token]
snake_case_ : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , _UpperCAmelCase )
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ : Optional[Any] = self.get_tokenizer()
snake_case_ : Dict = tokenizer('Hello' , 'World' )
snake_case_ : Tuple = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , _UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self : int ) -> int:
"""simple docstring"""
snake_case_ : Optional[int] = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
snake_case_ : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=_UpperCAmelCase )
snake_case_ : Optional[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=_UpperCAmelCase )
snake_case_ : Dict = tokenizer.encode(
'sequence builders' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
snake_case_ : Union[str, Any] = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=_UpperCAmelCase , add_prefix_space=_UpperCAmelCase )
snake_case_ : Any = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase )
snake_case_ : Tuple = tokenizer.build_inputs_with_special_tokens(_UpperCAmelCase , _UpperCAmelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCAmelCase_ ( self : int ) -> str:
"""simple docstring"""
snake_case_ : str = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case_ : Optional[int] = tokenizer_class.from_pretrained('microsoft/deberta-base' )
snake_case_ : Tuple = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
snake_case_ : Optional[Any] = tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
snake_case_ : str = [tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase ) for seq in encoding['input_ids']]
# fmt: off
snake_case_ : Optional[int] = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case_ : List[Any] = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , _UpperCAmelCase )
for expected, decoded in zip(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 327 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def lowerCamelCase_ ( _UpperCamelCase = "laptop" ) -> DataFrame:
"""simple docstring"""
snake_case_ : Any = f'''https://www.amazon.in/laptop/s?k={product}'''
snake_case_ : List[str] = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
snake_case_ : Optional[Any] = BeautifulSoup(requests.get(_UpperCamelCase , headers=_UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
snake_case_ : Tuple = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ):
try:
snake_case_ : int = item.ha.text
snake_case_ : Any = '''https://www.amazon.in/''' + item.ha.a['''href''']
snake_case_ : int = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text
try:
snake_case_ : str = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text
except AttributeError:
snake_case_ : int = '''Not available'''
try:
snake_case_ : Tuple = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1]
)
except AttributeError:
snake_case_ : List[str] = ''''''
try:
snake_case_ : Dict = float(
(
(
float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
- float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) )
)
/ float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) )
)
* 100 )
except ValueError:
snake_case_ : Optional[Any] = float('''nan''' )
except AttributeError:
pass
snake_case_ : Tuple = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
snake_case_ : Any = ''' '''
snake_case_ : Union[str, Any] = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCAmelCase_ = '''headphones'''
get_amazon_product_data(product).to_csv(F'''Amazon Product Data for {product}.csv''')
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class a ( lowerCAmelCase__ ):
_snake_case : int = "audio-spectrogram-transformer"
def __init__( self : Dict , __lowerCAmelCase : Union[str, Any]=768 , __lowerCAmelCase : Union[str, Any]=12 , __lowerCAmelCase : List[Any]=12 , __lowerCAmelCase : Optional[Any]=3072 , __lowerCAmelCase : Optional[int]="gelu" , __lowerCAmelCase : Tuple=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : List[str]=1e-1_2 , __lowerCAmelCase : int=16 , __lowerCAmelCase : Optional[Any]=True , __lowerCAmelCase : Dict=10 , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : List[Any]=1024 , __lowerCAmelCase : Optional[int]=128 , **__lowerCAmelCase : List[Any] , ):
super().__init__(**_UpperCAmelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = frequency_stride
_UpperCAmelCase = time_stride
_UpperCAmelCase = max_length
_UpperCAmelCase = num_mel_bins
| 289 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 | 0 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def UpperCAmelCase_( a__ , a__ , a__ = 10**-10 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = a
while True:
SCREAMING_SNAKE_CASE : Tuple = Decimal(a__ ) - (
Decimal(eval(a__ ) ) / Decimal(eval(str(diff(a__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(a__ ) ) < precision: # noqa: S307
return float(a__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(F"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(F"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(F"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 313 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 10
@require_sentencepiece
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[Any] = True
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__lowercase = sp.SentencePieceProcessor()
spm_model.Load(_UpperCAmelCase )
__lowercase = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )]
__lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = '<pad>'
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 10_01 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , )
__lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium"
lowerCAmelCase__ : Dict = "C'est trop cool"
lowerCAmelCase__ : List[Any] = "Esto es genial"
@classmethod
def a__ ( cls : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def a__ ( self : str ) -> int:
"""simple docstring"""
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
__lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2]
__lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'fr'
__lowercase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _UpperCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__lowercase = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 325 | 0 |
def _A ( SCREAMING_SNAKE_CASE__ : int ):
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 259 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = version.parse("1.12" )
@property
def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : int ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return 12
def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 325 | 0 |
"""simple docstring"""
from __future__ import annotations
class _UpperCAmelCase :
def __init__( self : Optional[Any] , _lowercase : int = 0 ):
__UpperCAmelCase = key
def a ( self : Optional[int] , _lowercase : str , _lowercase : int ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def a ( self : int , _lowercase : str , _lowercase : int ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(_UpperCAmelCase ) ^ key ) for ch in content]
def a ( self : str , _lowercase : str , _lowercase : int = 0 ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__UpperCAmelCase = ''''''
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def a ( self : Dict , _lowercase : str , _lowercase : int = 0 ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
__UpperCAmelCase = ''''''
for ch in content:
ans += chr(ord(_UpperCAmelCase ) ^ key )
return ans
def a ( self : List[str] , _lowercase : str , _lowercase : int = 0 ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
def a ( self : Optional[int] , _lowercase : str , _lowercase : int ):
assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase )
try:
with open(_UpperCAmelCase ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(_UpperCAmelCase , _UpperCAmelCase ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful")
| 332 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowercase = self.embedder(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def a__ ( self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = self.layers(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_UpperCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = RegNetConfig
lowerCAmelCase__ : Optional[int] = "regnet"
lowerCAmelCase__ : Dict = "pixel_values"
lowerCAmelCase__ : List[str] = True
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = value
SCREAMING_SNAKE_CASE__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = RegNetEmbeddings(_UpperCAmelCase )
__lowercase = RegNetEncoder(_UpperCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = RegNetModel(_UpperCAmelCase )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = 'single_label_classification'
else:
__lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 325 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowerCAmelCase__ ):
__lowerCamelCase : int = "xlm-roberta"
def __init__( self , _lowerCAmelCase=30522 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-12 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , _lowerCAmelCase="absolute" , _lowerCAmelCase=True , _lowerCAmelCase=None , **_lowerCAmelCase , ) -> Dict:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = use_cache
_lowerCAmelCase = classifier_dropout
class lowerCAmelCase_ ( lowerCAmelCase__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_lowerCAmelCase = {0: "batch", 1: "choice", 2: "sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 158 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[list[int]] ) -> int:
# preprocessing the first row
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(SCREAMING_SNAKE_CASE ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 325 | 0 |
"""simple docstring"""
def _A ( UpperCamelCase_ : Optional[Any]) -> Tuple:
'''simple docstring'''
__lowercase = [0] * len(UpperCamelCase_)
__lowercase = []
__lowercase = [1] * len(UpperCamelCase_)
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(UpperCamelCase_)):
if indegree[i] == 0:
queue.append(UpperCamelCase_)
while queue:
__lowercase = queue.pop(0)
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
__lowercase = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(UpperCamelCase_)
print(max(UpperCamelCase_))
# Adjacency list of Graph
_a = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 17 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class A__ ( enum.Enum ):
lowerCAmelCase__ : Dict = "all_checks"
lowerCAmelCase__ : List[Any] = "basic_checks"
lowerCAmelCase__ : Dict = "no_checks"
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict , SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Optional[Any]:
if expected_checksums is None:
logger.info('Unable to verify checksums.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedDownloadedFile(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
__lowercase = ' for ' + verification_name if verification_name is not None else ''
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingChecksumError(
F"""Checksums didn't match{for_verification_name}:\n"""
F"""{bad_urls}\n"""
'Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error' )
logger.info('All the checksums matched successfully' + for_verification_name )
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
class A__ ( lowerCAmelCase__ ):
pass
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[dict] , SCREAMING_SNAKE_CASE : dict ) -> Optional[int]:
if expected_splits is None:
logger.info('Unable to verify splits sizes.' )
return
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise ExpectedMoreSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
if len(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) > 0:
raise UnexpectedSplits(str(set(SCREAMING_SNAKE_CASE ) - set(SCREAMING_SNAKE_CASE ) ) )
__lowercase = [
{'expected': expected_splits[name], 'recorded': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(SCREAMING_SNAKE_CASE ) > 0:
raise NonMatchingSplitsSizesError(str(SCREAMING_SNAKE_CASE ) )
logger.info('All the splits matched successfully.' )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : bool = True ) -> dict:
if record_checksum:
__lowercase = shaaaa()
with open(SCREAMING_SNAKE_CASE , 'rb' ) as f:
for chunk in iter(lambda: f.read(1 << 20 ) , b'' ):
m.update(SCREAMING_SNAKE_CASE )
__lowercase = m.hexdigest()
else:
__lowercase = None
return {"num_bytes": os.path.getsize(SCREAMING_SNAKE_CASE ), "checksum": checksum}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 325 | 0 |
import os
def UpperCAmelCase_ ( ):
with open(os.path.dirname(__SCREAMING_SNAKE_CASE ) + '/p022_names.txt' ) as file:
lowercase = str(file.readlines()[0] )
lowercase = names.replace('"' , '' ).split(',' )
names.sort()
lowercase = 0
lowercase = 0
for i, name in enumerate(__SCREAMING_SNAKE_CASE ):
for letter in name:
name_score += ord(__SCREAMING_SNAKE_CASE ) - 64
total_score += (i + 1) * name_score
lowercase = 0
return total_score
if __name__ == "__main__":
print(solution())
| 195 |
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325 | 0 |
from numpy import exp, pi, sqrt
def a_ ( __lowercase : Any , __lowercase : float = 0.0 , __lowercase : float = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : int , **_UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : int ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def a__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [torch.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , torch.tensor(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(_UpperCAmelCase ):
__lowercase = processor.post_process_masks(_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) )
@require_vision
@require_tf
class A__ ( unittest.TestCase ):
def a__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : str , **_UpperCAmelCase : Tuple ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowercase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
__lowercase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='np' )
__lowercase = processor(images=_UpperCAmelCase , return_tensors='np' )
input_feat_extract.pop('original_sizes' ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop('reshaped_input_sizes' ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = [tf.ones((1, 3, 5, 5) )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , tf.convert_to_tensor(_UpperCAmelCase ) , tf.convert_to_tensor(_UpperCAmelCase ) , return_tensors='tf' , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowercase = [np.ones((1, 3, 5, 5) )]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowercase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowercase = processor.post_process_masks(
_UpperCAmelCase , np.array(_UpperCAmelCase ) , np.array(_UpperCAmelCase ) , return_tensors='tf' )
@require_vision
@require_torchvision
class A__ ( unittest.TestCase ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = SamImageProcessor()
__lowercase = SamProcessor(_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def a__ ( self : Dict , **_UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase ).image_processor
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowercase = [tf.convert_to_tensor(_UpperCAmelCase )]
__lowercase = [torch.tensor(_UpperCAmelCase )]
__lowercase = [[17_64, 26_46]]
__lowercase = [[6_83, 10_24]]
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='tf' )
__lowercase = processor.post_process_masks(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , return_tensors='pt' )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = SamProcessor(image_processor=_UpperCAmelCase )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='pt' )['pixel_values'].numpy()
__lowercase = image_processor(_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
__lowercase = processor(images=_UpperCAmelCase , return_tensors='tf' )['pixel_values'].numpy()
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
| 325 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 340 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 325 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
_SCREAMING_SNAKE_CASE = datasets.logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = """\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel's Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = \"{COMET}: A Neural Framework for {MT} Evaluation\",
author = \"Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon\",
booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",
month = nov,
year = \"2020\",
address = \"Online\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",
pages = \"2685--2702\",
}
"""
_SCREAMING_SNAKE_CASE = """\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
"""
_SCREAMING_SNAKE_CASE = """
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric('comet')
>>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use
>>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]
>>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]
>>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results[\"scores\"]])
[0.19, 0.92]
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://unbabel.github.io/COMET/html/index.html' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'sources': datasets.Value('string' , id='sequence' ),
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/Unbabel/COMET'] , reference_urls=[
'https://github.com/Unbabel/COMET',
'https://www.aclweb.org/anthology/2020.emnlp-main.213/',
'http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6',
] , )
def UpperCAmelCase_ ( self : str , _A : Tuple ) -> Tuple:
"""simple docstring"""
if self.config_name == "default":
snake_case_ : Any = comet.load_from_checkpoint(comet.download_model('wmt20-comet-da' ) )
else:
snake_case_ : List[Any] = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def UpperCAmelCase_ ( self : List[Any] , _A : Union[str, Any] , _A : Optional[int] , _A : Tuple , _A : Any=None , _A : Union[str, Any]=False ) -> List[str]:
"""simple docstring"""
if gpus is None:
snake_case_ : Optional[int] = 1 if torch.cuda.is_available() else 0
snake_case_ : int = {'src': sources, 'mt': predictions, 'ref': references}
snake_case_ : int = [dict(zip(_UpperCAmelCase , _UpperCAmelCase ) ) for t in zip(*data.values() )]
snake_case_ ,snake_case_ : List[Any] = self.scorer.predict(_UpperCAmelCase , gpus=_UpperCAmelCase , progress_bar=_UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 327 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""transfo-xl-wt103""": """https://huggingface.co/transfo-xl-wt103/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = "transfo-xl"
lowerCAmelCase__ : int = ["mems"]
lowerCAmelCase__ : Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[int] , _UpperCAmelCase : Tuple=26_77_35 , _UpperCAmelCase : Any=[2_00_00, 4_00_00, 20_00_00] , _UpperCAmelCase : Tuple=10_24 , _UpperCAmelCase : Union[str, Any]=10_24 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=40_96 , _UpperCAmelCase : List[Any]=4 , _UpperCAmelCase : str=False , _UpperCAmelCase : Optional[Any]=18 , _UpperCAmelCase : int=16_00 , _UpperCAmelCase : Optional[int]=10_00 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=0 , _UpperCAmelCase : Optional[Any]=-1 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Optional[Any]=0.1 , _UpperCAmelCase : List[str]=0.0 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : int="normal" , _UpperCAmelCase : int=0.01 , _UpperCAmelCase : List[Any]=0.01 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Optional[Any]=1e-5 , _UpperCAmelCase : Tuple=0 , **_UpperCAmelCase : List[str] , ) -> Tuple:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = []
self.cutoffs.extend(_UpperCAmelCase )
if proj_share_all_but_first:
__lowercase = [False] + [True] * len(self.cutoffs )
else:
__lowercase = [False] + [False] * len(self.cutoffs )
__lowercase = d_model
__lowercase = d_embed
__lowercase = d_head
__lowercase = d_inner
__lowercase = div_val
__lowercase = pre_lnorm
__lowercase = n_layer
__lowercase = n_head
__lowercase = mem_len
__lowercase = same_length
__lowercase = attn_type
__lowercase = clamp_len
__lowercase = sample_softmax
__lowercase = adaptive
__lowercase = dropout
__lowercase = dropatt
__lowercase = untie_r
__lowercase = init
__lowercase = init_range
__lowercase = proj_init_std
__lowercase = init_std
__lowercase = layer_norm_epsilon
super().__init__(eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
@property
def a__ ( self : Tuple ) -> Any:
"""simple docstring"""
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def a__ ( self : Dict , _UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 325 | 0 |
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase_ ( _UpperCamelCase="" ) -> str:
"""simple docstring"""
snake_case_ : Any = tempfile.mkdtemp()
return os.path.join(_UpperCamelCase , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
snake_case_ : str = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Tuple = AgentAudio(_UpperCAmelCase )
snake_case_ : Any = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
snake_case_ , snake_case_ : List[str] = sf.read(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase , torch.tensor(_UpperCAmelCase ) , atol=1e-4 ) )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
snake_case_ : Dict = get_new_path(suffix='''.wav''' )
sf.write(_UpperCAmelCase , _UpperCAmelCase , 1_6000 )
snake_case_ : Any = AgentAudio(_UpperCAmelCase )
self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , _UpperCAmelCase )
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = torch.randint(0 , 256 , (64, 64, 3) )
snake_case_ : Tuple = AgentImage(_UpperCAmelCase )
snake_case_ : Dict = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(_UpperCAmelCase , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : str = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : int = Image.open(_UpperCAmelCase )
snake_case_ : Dict = AgentImage(_UpperCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
snake_case_ : List[str] = Image.open(_UpperCAmelCase )
snake_case_ : str = AgentImage(_UpperCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(_UpperCAmelCase ) )
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : int = '''Hey!'''
snake_case_ : List[str] = AgentText(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , agent_type.to_string() )
self.assertEqual(_UpperCAmelCase , agent_type.to_raw() )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
| 279 |
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__ = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
for attribute in key.split('.' ):
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if weight_type is not None:
__lowercase = getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ).shape
else:
__lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
__lowercase = value
elif weight_type == "weight_g":
__lowercase = value
elif weight_type == "weight_v":
__lowercase = value
elif weight_type == "bias":
__lowercase = value
else:
__lowercase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Tuple:
__lowercase = []
__lowercase = fairseq_model.state_dict()
__lowercase = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
__lowercase = None
for name, value in fairseq_dict.items():
__lowercase = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , )
__lowercase = True
elif name.split('.' )[0] == "proj":
__lowercase = fairseq_model.proj
__lowercase = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowercase = True
if "*" in mapped_key:
__lowercase = name.split(SCREAMING_SNAKE_CASE )[0].split('.' )[-2]
__lowercase = mapped_key.replace('*' , SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__lowercase = 'weight_g'
elif "weight_v" in name:
__lowercase = 'weight_v'
elif "bias" in name:
__lowercase = 'bias'
elif "weight" in name:
__lowercase = 'weight'
else:
__lowercase = None
set_recursively(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
__lowercase = full_name.split('conv_layers.' )[-1]
__lowercase = name.split('.' )
__lowercase = int(items[0] )
__lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
__lowercase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Tuple ) -> List[str]:
__lowercase , __lowercase = emb.weight.shape
__lowercase = nn.Linear(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , bias=SCREAMING_SNAKE_CASE )
__lowercase = emb.weight.data
return lin_layer
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] ) -> Optional[Any]:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f:
__lowercase = f.readlines()
__lowercase = [line.split(' ' )[0] for line in lines]
__lowercase = len(SCREAMING_SNAKE_CASE )
__lowercase = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(SCREAMING_SNAKE_CASE , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , ) -> List[Any]:
__lowercase = WavaVecaConfig.from_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaConfig.from_pretrained(
SCREAMING_SNAKE_CASE , vocab_size=SCREAMING_SNAKE_CASE , decoder_layers=SCREAMING_SNAKE_CASE , do_stable_layer_norm=SCREAMING_SNAKE_CASE )
__lowercase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
__lowercase , __lowercase , __lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
__lowercase = model[0].eval()
# set weights for wav2vec2 encoder
__lowercase = WavaVecaModel(SCREAMING_SNAKE_CASE )
__lowercase = recursively_load_weights_wavaveca(model.encoder , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaForCausalLM(SCREAMING_SNAKE_CASE )
__lowercase , __lowercase = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=SCREAMING_SNAKE_CASE )
# set output linear layer
unexpected_keys.remove('embed_out' )
__lowercase = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
__lowercase = SpeechEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
__lowercase = False
# add projection layer
__lowercase = nn.Parameter(projection_layer.weight )
__lowercase = nn.Parameter(projection_layer.bias )
__lowercase = create_vocab_dict(SCREAMING_SNAKE_CASE )
with open(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) , 'w' ) as fp:
json.dump(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = SpeechaTextaTokenizer(os.path.join(SCREAMING_SNAKE_CASE , 'vocab.json' ) )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
__lowercase = hf_wavavec.config.to_dict()
__lowercase = tokenizer.pad_token_id
__lowercase = tokenizer.bos_token_id
__lowercase = tokenizer.eos_token_id
__lowercase = 'speech_to_text_2'
__lowercase = 'wav2vec2'
__lowercase = SpeechEncoderDecoderConfig.from_dict(SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(SCREAMING_SNAKE_CASE )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-large-lv60""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/s2t-small-mustc-en-fr-st""",
type=str,
help="""Path to hf decoder s2t checkpoint config""",
)
parser.add_argument("""--vocab_size""", default=1_0224, type=int, help="""Vocab size of decoder""")
parser.add_argument("""--num_decoder_layers""", default=7, type=int, help="""Number of decoder layers""")
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 325 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class a ( lowerCAmelCase__ , lowerCAmelCase__ ):
_snake_case : Any = "swin"
_snake_case : Union[str, Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : List[Any] , __lowerCAmelCase : int=224 , __lowerCAmelCase : Any=4 , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[Any]=96 , __lowerCAmelCase : str=[2, 2, 6, 2] , __lowerCAmelCase : Optional[Any]=[3, 6, 12, 24] , __lowerCAmelCase : Union[str, Any]=7 , __lowerCAmelCase : int=4.0 , __lowerCAmelCase : Tuple=True , __lowerCAmelCase : Optional[int]=0.0 , __lowerCAmelCase : Any=0.0 , __lowerCAmelCase : List[str]=0.1 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Tuple=False , __lowerCAmelCase : str=0.02 , __lowerCAmelCase : Union[str, Any]=1e-5 , __lowerCAmelCase : Optional[Any]=32 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Optional[Any]=None , **__lowerCAmelCase : Optional[int] , ):
super().__init__(**_UpperCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(_UpperCAmelCase ) - 1) )
_UpperCAmelCase = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(_UpperCAmelCase ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=_UpperCAmelCase , out_indices=_UpperCAmelCase , stage_names=self.stage_names )
class a ( lowerCAmelCase__ ):
_snake_case : str = version.parse('1.11' )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCAmelCase_ ( self : int ):
return 1e-4
| 289 |
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
__lowercase = [0 for i in range(r + 1 )]
# nc0 = 1
__lowercase = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__lowercase = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 325 | 0 |
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a__ : Union[str, Any] = TypeVar('''T''')
class a_ ( Generic[T] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : deque[T] # Cache store of keys
__SCREAMING_SNAKE_CASE : set[T] # References of the keys in cache
__SCREAMING_SNAKE_CASE : int = 10 # Maximum capacity of cache
def __init__( self , _lowerCamelCase ) ->None:
SCREAMING_SNAKE_CASE : int = deque()
SCREAMING_SNAKE_CASE : Union[str, Any] = set()
if not n:
SCREAMING_SNAKE_CASE : Optional[Any] = sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = n
def __lowerCAmelCase ( self , _lowerCamelCase ) ->None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE : int = self.dq_store.pop()
self.key_reference.remove(_UpperCAmelCase )
else:
self.dq_store.remove(_UpperCAmelCase )
self.dq_store.appendleft(_UpperCAmelCase )
self.key_reference.add(_UpperCAmelCase )
def __lowerCAmelCase ( self ) ->None:
for k in self.dq_store:
print(_UpperCAmelCase )
def __repr__( self ) ->str:
return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Tuple = LRUCache(4)
lru_cache.refer('''A''')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('''A''')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 313 |
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Union[str, Any] = ["vqvae"]
def __init__( self : int , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : UNetaDConditionModel , _UpperCAmelCase : Mel , _UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(unet=_UpperCAmelCase , scheduler=_UpperCAmelCase , mel=_UpperCAmelCase , vqvae=_UpperCAmelCase )
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
return 50 if isinstance(self.scheduler , _UpperCAmelCase ) else 10_00
@torch.no_grad()
def __call__( self : str , _UpperCAmelCase : int = 1 , _UpperCAmelCase : str = None , _UpperCAmelCase : np.ndarray = None , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = 0 , _UpperCAmelCase : int = None , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Generator = None , _UpperCAmelCase : float = 0 , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : torch.Tensor = None , _UpperCAmelCase : str=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
"""simple docstring"""
__lowercase = steps or self.get_default_steps()
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
__lowercase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
__lowercase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=_UpperCAmelCase , device=self.device , )
__lowercase = noise
__lowercase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = self.mel.audio_slice_to_image(_UpperCAmelCase )
__lowercase = np.frombuffer(input_image.tobytes() , dtype='uint8' ).reshape(
(input_image.height, input_image.width) )
__lowercase = (input_image / 2_55) * 2 - 1
__lowercase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
__lowercase = self.vqvae.encode(torch.unsqueeze(_UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=_UpperCAmelCase )[0]
__lowercase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
__lowercase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
__lowercase = int(mask_start_secs * pixels_per_second )
__lowercase = int(mask_end_secs * pixels_per_second )
__lowercase = self.scheduler.add_noise(_UpperCAmelCase , _UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , _UpperCAmelCase ):
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )['sample']
else:
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
if isinstance(self.scheduler , _UpperCAmelCase ):
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , eta=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
else:
__lowercase = self.scheduler.step(
model_output=_UpperCAmelCase , timestep=_UpperCAmelCase , sample=_UpperCAmelCase , generator=_UpperCAmelCase , )['prev_sample']
if mask is not None:
if mask_start > 0:
__lowercase = mask[:, step, :, :mask_start]
if mask_end > 0:
__lowercase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
__lowercase = 1 / self.vqvae.config.scaling_factor * images
__lowercase = self.vqvae.decode(_UpperCAmelCase )['sample']
__lowercase = (images / 2 + 0.5).clamp(0 , 1 )
__lowercase = images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
__lowercase = (images * 2_55).round().astype('uint8' )
__lowercase = list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(_UpperCAmelCase , mode='RGB' ).convert('L' ) for _ in images) )
__lowercase = [self.mel.image_to_audio(_UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(_UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(_UpperCAmelCase ) )
@torch.no_grad()
def a__ ( self : Any , _UpperCAmelCase : List[Image.Image] , _UpperCAmelCase : int = 50 ) -> np.ndarray:
"""simple docstring"""
assert isinstance(self.scheduler , _UpperCAmelCase )
self.scheduler.set_timesteps(_UpperCAmelCase )
__lowercase = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8' ).reshape((1, image.height, image.width) ) for image in images] )
__lowercase = (sample / 2_55) * 2 - 1
__lowercase = torch.Tensor(_UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
__lowercase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
__lowercase = self.scheduler.alphas_cumprod[t]
__lowercase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
__lowercase = 1 - alpha_prod_t
__lowercase = self.unet(_UpperCAmelCase , _UpperCAmelCase )['sample']
__lowercase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
__lowercase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
__lowercase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def a__ ( _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : torch.Tensor , _UpperCAmelCase : float ) -> torch.Tensor:
"""simple docstring"""
__lowercase = acos(torch.dot(torch.flatten(_UpperCAmelCase ) , torch.flatten(_UpperCAmelCase ) ) / torch.norm(_UpperCAmelCase ) / torch.norm(_UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(_UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(_UpperCAmelCase )
| 325 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
UpperCamelCase_ : List[Any] =(
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
UpperCamelCase_ : List[Any] ="CIDAS/clipseg-rd64-refined"
UpperCamelCase_ : Dict ="image_segmenter"
UpperCamelCase_ : Union[str, Any] =CLIPSegForImageSegmentation
UpperCamelCase_ : List[Any] =["image", "text"]
UpperCamelCase_ : Union[str, Any] =["image"]
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
requires_backends(self , ['''vision'''] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors='''pt''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
with torch.no_grad():
UpperCamelCase :Any = self.model(**_UpperCAmelCase ).logits
return logits
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCamelCase :int = outputs.cpu().detach().numpy()
UpperCamelCase :Optional[int] = 0
UpperCamelCase :int = 1
return Image.fromarray((array * 255).astype(np.uinta ) )
| 259 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
SCREAMING_SNAKE_CASE__ = 10
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
for i in range(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__lowercase = one_third - 1
elif array[two_third] < target:
__lowercase = two_third + 1
else:
__lowercase = one_third + 1
__lowercase = two_third - 1
else:
return -1
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] , SCREAMING_SNAKE_CASE : int ) -> int:
if left < right:
if right - left < precision:
return lin_search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = (left + right) // 3 + 1
__lowercase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(SCREAMING_SNAKE_CASE , one_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE__ = input("""Enter numbers separated by comma:\n""").strip()
SCREAMING_SNAKE_CASE__ = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
SCREAMING_SNAKE_CASE__ = int(input("""Enter the number to be found in the list:\n""").strip())
SCREAMING_SNAKE_CASE__ = ite_ternary_search(collection, target)
SCREAMING_SNAKE_CASE__ = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 325 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
if len(snake_case_ ) == 0:
return False
__UpperCAmelCase = len(snake_case_ ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , snake_case_ )
else:
return binary_search(a_list[midpoint + 1 :] , snake_case_ )
if __name__ == "__main__":
_lowercase : Tuple = input('Enter numbers separated by comma:\n').strip()
_lowercase : int = [int(item.strip()) for item in user_input.split(',')]
_lowercase : Optional[int] = int(input('Enter the number to be found in the list:\n').strip())
_lowercase : List[Any] = '' if binary_search(sequence, target) else 'not '
print(f"""{target} was {not_str}found in {sequence}""")
| 332 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> List[str]:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : nn.Module , _UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = module
__lowercase = nn.Sequential(
nn.Linear(module.in_features , _UpperCAmelCase , bias=_UpperCAmelCase ) , nn.Linear(_UpperCAmelCase , module.out_features , bias=_UpperCAmelCase ) , )
__lowercase = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=_UpperCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def a__ ( self : str , _UpperCAmelCase : List[str] , *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.module(_UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase ) + self.adapter(_UpperCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowerCAmelCase__ : int = "bigscience/bloom-1b7"
# Constant values
lowerCAmelCase__ : Any = 2.109659552692574
lowerCAmelCase__ : str = "Hello my name is"
lowerCAmelCase__ : Any = set()
EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I" )
EXPECTED_OUTPUTS.add("Hello my name is John.\nI am a friend of your father.\n" )
EXPECTED_OUTPUTS.add("Hello my name is John Doe, I am a student at the University" )
lowerCAmelCase__ : List[Any] = 10
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__lowercase = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# Models and tokenizer
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : Any ) -> Optional[Any]:
"""simple docstring"""
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = self.model_abit.config
self.assertTrue(hasattr(_UpperCAmelCase , 'quantization_config' ) )
__lowercase = config.to_dict()
__lowercase = config.to_diff_dict()
__lowercase = config.to_json_string()
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
__lowercase = self.model_fpaa.get_memory_footprint()
__lowercase = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
__lowercase = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(_UpperCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
__lowercase = True
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
def a__ ( self : str ) -> List[str]:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
__lowercase = BitsAndBytesConfig()
with self.assertRaises(_UpperCAmelCase ):
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=_UpperCAmelCase , load_in_abit=_UpperCAmelCase , device_map='auto' , bnb_abit_quant_type='nf4' , )
def a__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
with self.assertRaises(_UpperCAmelCase ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(_UpperCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
__lowercase = self.model_fpaa.to(torch.floataa )
__lowercase = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
__lowercase = self.model_fpaa.to('cpu' )
# Check this does not throw an error
__lowercase = self.model_fpaa.half()
# Check this does not throw an error
__lowercase = self.model_fpaa.float()
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=_UpperCAmelCase , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def a__ ( cls : int ) -> Tuple:
"""simple docstring"""
__lowercase = 't5-small'
__lowercase = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
__lowercase = AutoTokenizer.from_pretrained(cls.model_name )
__lowercase = 'Translate in German: Hello, my dog is cute'
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : int ) -> int:
"""simple docstring"""
from transformers import TaForConditionalGeneration
__lowercase = TaForConditionalGeneration._keep_in_fpaa_modules
__lowercase = None
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
__lowercase = modules
def a__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
# test with `flan-t5-small`
__lowercase = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
__lowercase = model.generate(**_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
super().setUp()
# model_name
__lowercase = 'bigscience/bloom-560m'
__lowercase = 't5-small'
# Different types of model
__lowercase = AutoModel.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Sequence classification model
__lowercase = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# CausalLM model
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
# Seq2seq model
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=_UpperCAmelCase , device_map='auto' )
def a__ ( self : int ) -> List[str]:
"""simple docstring"""
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> str:
"""simple docstring"""
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : str ) -> str:
"""simple docstring"""
super().setUp()
def a__ ( self : Dict ) -> Any:
"""simple docstring"""
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def a__ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
__lowercase = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( lowerCAmelCase__ ):
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
super().setUp()
def a__ ( self : List[Any] ) -> int:
"""simple docstring"""
__lowercase = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=_UpperCAmelCase , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
__lowercase = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
__lowercase = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=_UpperCAmelCase ) , self.EXPECTED_OUTPUTS )
class A__ ( lowerCAmelCase__ ):
def a__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'facebook/opt-350m'
super().setUp()
def a__ ( self : Dict ) -> List[str]:
"""simple docstring"""
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
__lowercase = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=_UpperCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
__lowercase = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
__lowercase = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(_UpperCAmelCase ) ):
__lowercase = LoRALayer(module.q_proj , rank=16 )
__lowercase = LoRALayer(module.k_proj , rank=16 )
__lowercase = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
__lowercase = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
__lowercase = model.forward(**_UpperCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(_UpperCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Any = "gpt2-xl"
lowerCAmelCase__ : str = 3.3191854854152187
| 325 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class lowerCAmelCase_ ( lowerCAmelCase__ ):
__lowerCamelCase : Tuple = "mra"
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=768 , _lowerCAmelCase=12 , _lowerCAmelCase=12 , _lowerCAmelCase=3072 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=512 , _lowerCAmelCase=1 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1E-5 , _lowerCAmelCase="absolute" , _lowerCAmelCase=4 , _lowerCAmelCase="full" , _lowerCAmelCase=0 , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=0 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Tuple:
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_act
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = initializer_range
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = position_embedding_type
_lowerCAmelCase = block_per_row
_lowerCAmelCase = approx_mode
_lowerCAmelCase = initial_prior_first_n_blocks
_lowerCAmelCase = initial_prior_diagonal_n_blocks
| 158 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any]=13 , _UpperCAmelCase : List[str]=7 , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : str=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : Optional[Any]=99 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=5_12 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Optional[int]=2 , _UpperCAmelCase : List[str]=0.02 , _UpperCAmelCase : Optional[int]=3 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = parent
__lowercase = 13
__lowercase = 7
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = True
__lowercase = 99
__lowercase = 3_84
__lowercase = 2
__lowercase = 4
__lowercase = 37
__lowercase = 'gelu'
__lowercase = 0.1
__lowercase = 0.1
__lowercase = 5_12
__lowercase = 16
__lowercase = 2
__lowercase = 0.02
__lowercase = 3
__lowercase = 4
__lowercase = 1_28
__lowercase = 2
__lowercase = 9
__lowercase = 1
__lowercase = None
def a__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase = None
if self.use_input_mask:
__lowercase = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase = None
if self.use_token_type_ids:
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase = None
__lowercase = None
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase = ids_tensor([self.batch_size] , self.num_choices )
__lowercase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModel(config=_UpperCAmelCase )
__lowercase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowercase = [input_ids, input_mask]
__lowercase = model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
__lowercase = TFConvBertForMaskedLM(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : str ) -> Dict:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForSequenceClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a__ ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.num_choices
__lowercase = TFConvBertForMultipleChoice(config=_UpperCAmelCase )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = tf.tile(tf.expand_dims(_UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
__lowercase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Dict , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] ) -> int:
"""simple docstring"""
__lowercase = self.num_labels
__lowercase = TFConvBertForTokenClassification(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = TFConvBertForQuestionAnswering(config=_UpperCAmelCase )
__lowercase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowercase = model(_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) = config_and_inputs
__lowercase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = False
def a__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__lowercase = TFConvBertModelTester(self )
__lowercase = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def a__ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> int:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def a__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def a__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def a__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = True
if hasattr(_UpperCAmelCase , 'use_cache' ):
__lowercase = True
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
for model_class in self.all_model_classes:
__lowercase = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = model_class(_UpperCAmelCase )
__lowercase = len(model(_UpperCAmelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase , saved_model=_UpperCAmelCase )
__lowercase = os.path.join(_UpperCAmelCase , 'saved_model' , '1' )
__lowercase = tf.keras.models.load_model(_UpperCAmelCase )
__lowercase = model(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = outputs['encoder_hidden_states']
__lowercase = outputs['encoder_attentions']
else:
__lowercase = outputs['hidden_states']
__lowercase = outputs['attentions']
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
__lowercase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def a__ ( self : List[str] ) -> Dict:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_UpperCAmelCase )
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
__lowercase = True
__lowercase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
__lowercase = getattr(self.model_tester , 'key_length' , _UpperCAmelCase )
def check_decoder_attentions_output(_UpperCAmelCase : int ):
__lowercase = len(_UpperCAmelCase )
self.assertEqual(out_len % 2 , 0 )
__lowercase = outputs.decoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_UpperCAmelCase : Union[str, Any] ):
__lowercase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowercase = True
__lowercase = False
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
__lowercase = len(_UpperCAmelCase )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
if self.is_encoder_decoder:
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_decoder_attentions_output(_UpperCAmelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
# Check attention is always last and order is fine
__lowercase = True
__lowercase = True
__lowercase = model_class(_UpperCAmelCase )
__lowercase = model(self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_UpperCAmelCase ) )
self.assertEqual(model.config.output_hidden_states , _UpperCAmelCase )
check_encoder_attentions_output(_UpperCAmelCase )
@require_tf
class A__ ( unittest.TestCase ):
@slow
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowercase = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowercase = model(_UpperCAmelCase )[0]
__lowercase = [1, 6, 7_68]
self.assertEqual(output.shape , _UpperCAmelCase )
__lowercase = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 )
| 325 | 0 |
"""simple docstring"""
import doctest
from collections import deque
import numpy as np
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] ):
__lowercase = [2, 1, 2, -1]
__lowercase = [1, 2, 3, 4]
def _lowercase ( self : Tuple ):
__lowercase = len(self.first_signal )
__lowercase = len(self.second_signal )
__lowercase = max(_UpperCAmelCase, _UpperCAmelCase )
# create a zero matrix of max_length x max_length
__lowercase = [[0] * max_length for i in range(_UpperCAmelCase )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(_UpperCAmelCase ):
__lowercase = deque(self.second_signal )
rotated_signal.rotate(_UpperCAmelCase )
for j, item in enumerate(_UpperCAmelCase ):
matrix[i][j] += item
# multiply the matrix with the first signal
__lowercase = np.matmul(np.transpose(_UpperCAmelCase ), np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(_UpperCAmelCase, 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 17 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("""ignore""", category=UserWarning, module="""torch.optim.lr_scheduler""")
class A__ :
def __init__( self : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = False ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = scheduler
__lowercase = optimizers if isinstance(_UpperCAmelCase , (list, tuple) ) else [optimizers]
__lowercase = split_batches
__lowercase = step_with_optimizer
__lowercase = GradientState()
def a__ ( self : Optional[int] , *_UpperCAmelCase : int , **_UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
__lowercase = AcceleratorState().num_processes
for _ in range(_UpperCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , 'total_steps' ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
else:
self.scheduler.step(*_UpperCAmelCase , **_UpperCAmelCase )
def a__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return self.scheduler.get_last_lr()
def a__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
return self.scheduler.state_dict()
def a__ ( self : Optional[int] , _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
self.scheduler.load_state_dict(_UpperCAmelCase )
def a__ ( self : Dict ) -> int:
"""simple docstring"""
return self.scheduler.get_lr()
def a__ ( self : Union[str, Any] , *_UpperCAmelCase : Union[str, Any] , **_UpperCAmelCase : List[str] ) -> Any:
"""simple docstring"""
return self.scheduler.print_lr(*_UpperCAmelCase , **_UpperCAmelCase )
| 325 | 0 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if index == r:
for j in range(__SCREAMING_SNAKE_CASE ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
lowercase = arr[i]
combination_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 , __SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# A temporary array to store all combination one by one
lowercase = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
UpperCAmelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 195 |
import collections
import importlib.util
import os
import re
from pathlib import Path
SCREAMING_SNAKE_CASE__ = """src/transformers"""
# Matches is_xxx_available()
SCREAMING_SNAKE_CASE__ = re.compile(r"""is\_([a-z_]*)_available()""")
# Catches a one-line _import_struct = {xxx}
SCREAMING_SNAKE_CASE__ = re.compile(r"""^_import_structure\s+=\s+\{([^\}]+)\}""")
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+\"\S*\":\s+\[([^\]]*)\]""")
# Catches a line if not is_foo_available
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*if\s+not\s+is\_[a-z_]*\_available\(\)""")
# Catches a line _import_struct["bla"].append("foo")
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)""")
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]""")
# Catches a line with an object between quotes and a comma: "MyModel",
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\"([^\"]+)\",""")
# Catches a line with objects between brackets only: ["foo", "bar"],
SCREAMING_SNAKE_CASE__ = re.compile("""^\s+\[([^\]]+)\]""")
# Catches a line with from foo import bar, bla, boo
SCREAMING_SNAKE_CASE__ = re.compile(r"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
# Catches a line with try:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*try:""")
# Catches a line with else:
SCREAMING_SNAKE_CASE__ = re.compile(r"""^\s*else:""")
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
if _re_test_backend.search(SCREAMING_SNAKE_CASE ) is None:
return None
__lowercase = [b[0] for b in _re_backend.findall(SCREAMING_SNAKE_CASE )]
backends.sort()
return "_and_".join(SCREAMING_SNAKE_CASE )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] ) -> Tuple:
with open(SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowercase = f.readlines()
__lowercase = 0
while line_index < len(SCREAMING_SNAKE_CASE ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(SCREAMING_SNAKE_CASE ):
return None
# First grab the objects without a specific backend in _import_structure
__lowercase = []
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__lowercase = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ):
__lowercase = _re_one_line_import_struct.search(SCREAMING_SNAKE_CASE ).groups()[0]
__lowercase = re.findall('\[([^\]]+)\]' , SCREAMING_SNAKE_CASE )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__lowercase = _re_import_struct_key_value.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
__lowercase = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__lowercase = lines[line_index]
if _re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_import_struct_add_one.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_import_struct_add_many.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_between_brackets.search(SCREAMING_SNAKE_CASE ) is not None:
__lowercase = _re_between_brackets.search(SCREAMING_SNAKE_CASE ).groups()[0].split(', ' )
__lowercase = [obj[1:-1] for obj in imports if len(SCREAMING_SNAKE_CASE ) > 0]
objects.extend(SCREAMING_SNAKE_CASE )
elif _re_quote_object.search(SCREAMING_SNAKE_CASE ) is not None:
objects.append(_re_quote_object.search(SCREAMING_SNAKE_CASE ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__lowercase = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__lowercase = []
while (
line_index < len(SCREAMING_SNAKE_CASE )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__lowercase = {'none': objects}
# Let's continue with backend-specific objects
while line_index < len(SCREAMING_SNAKE_CASE ):
# If the line is an if is_backend_available, we grab all objects associated.
__lowercase = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__lowercase = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__lowercase = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__lowercase = lines[line_index]
__lowercase = _re_import.search(SCREAMING_SNAKE_CASE )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__lowercase = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : int ) -> int:
def find_duplicates(SCREAMING_SNAKE_CASE : Tuple ):
return [k for k, v in collections.Counter(SCREAMING_SNAKE_CASE ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__lowercase = []
for key in import_dict_objects.keys():
__lowercase = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
__lowercase = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__lowercase = 'base imports' if key == 'none' else F"""{key} backend"""
errors.append(F"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
__lowercase = []
for root, _, files in os.walk(SCREAMING_SNAKE_CASE ):
if "__init__.py" in files:
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' )
__lowercase = parse_init(SCREAMING_SNAKE_CASE )
if objects is not None:
__lowercase = analyze_results(*SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = F"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('\n'.join(SCREAMING_SNAKE_CASE ) )
if len(SCREAMING_SNAKE_CASE ) > 0:
raise ValueError('\n\n'.join(SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( ) -> Dict:
__lowercase = []
for path, directories, files in os.walk(SCREAMING_SNAKE_CASE ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(SCREAMING_SNAKE_CASE )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(SCREAMING_SNAKE_CASE ) / folder).glob('*.py' ) ) ) == 0:
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / folder).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace(os.path.sep , '.' )
submodules.append(SCREAMING_SNAKE_CASE )
for fname in files:
if fname == "__init__.py":
continue
__lowercase = str((Path(SCREAMING_SNAKE_CASE ) / fname).relative_to(SCREAMING_SNAKE_CASE ) )
__lowercase = short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(SCREAMING_SNAKE_CASE )
return submodules
SCREAMING_SNAKE_CASE__ = [
"""convert_pytorch_checkpoint_to_tf2""",
"""modeling_flax_pytorch_utils""",
]
def __SCREAMING_SNAKE_CASE ( ) -> List[str]:
# This is to make sure the transformers module imported is the one in the repo.
__lowercase = importlib.util.spec_from_file_location(
'transformers' , os.path.join(SCREAMING_SNAKE_CASE , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__lowercase = spec.loader.load_module()
__lowercase = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(SCREAMING_SNAKE_CASE ) > 0:
__lowercase = '\n'.join(F"""- {module}""" for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F"""{list_of_modules}\n"""
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 325 | 0 |
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def a_ ( ) -> List[str]:
_snake_case = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=__lowercase , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=__lowercase , default=5 )
parser.add_argument('--batch_size' , type=__lowercase , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=__lowercase , default=1 )
parser.add_argument('--freeze' , type=__lowercase , default=__lowercase )
parser.add_argument('--learning_rate' , type=__lowercase , default=5E-4 )
parser.add_argument('--seed' , type=__lowercase , default=0 )
parser.add_argument('--lr_scheduler_type' , type=__lowercase , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=__lowercase , default=10 )
parser.add_argument('--weight_decay' , type=__lowercase , default=0.0_1 )
parser.add_argument('--output_dir' , type=__lowercase , default='./results' )
return parser.parse_args()
_lowerCamelCase : Union[str, Any] = load('''accuracy''')
def a_ ( __lowercase : Dict ) -> Union[str, Any]:
_snake_case , _snake_case = eval_pred
_snake_case = np.argmax(__lowercase , axis=1 )
return metric.compute(predictions=__lowercase , references=__lowercase )
class SCREAMING_SNAKE_CASE__ ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] ):
'''simple docstring'''
super().__init__()
_snake_case = trainer
def A ( self : Tuple , lowercase : Any , lowercase : Union[str, Any] , lowercase : str , **lowercase : Tuple ):
'''simple docstring'''
if control.should_evaluate:
_snake_case = deepcopy(_UpperCAmelCase )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def a_ ( ) -> str:
_snake_case = get_args()
set_seed(args.seed )
_snake_case = load_dataset('codeparrot/codecomplex' , split='train' )
_snake_case = dataset.train_test_split(test_size=0.2 )
_snake_case = train_test['test'].train_test_split(test_size=0.5 )
_snake_case = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
_snake_case = AutoTokenizer.from_pretrained(args.model_ckpt )
_snake_case = tokenizer.eos_token
_snake_case = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
_snake_case = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
_snake_case = False
_snake_case = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(__lowercase : Any ):
_snake_case = tokenizer(example['src'] , truncation=__lowercase , max_length=1_024 )
_snake_case = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
_snake_case = train_test_validation.map(
__lowercase , batched=__lowercase , remove_columns=train_test_validation['train'].column_names , )
_snake_case = DataCollatorWithPadding(tokenizer=__lowercase )
_snake_case = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.0_1 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
_snake_case = Trainer(
model=__lowercase , args=__lowercase , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=__lowercase , data_collator=__lowercase , compute_metrics=__lowercase , )
print('Training...' )
trainer.add_callback(CustomCallback(__lowercase ) )
trainer.train()
if __name__ == "__main__":
main() | 282 |
import logging
import os
from .state import PartialState
class A__ ( logging.LoggerAdapter ):
@staticmethod
def a__ ( _UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
__lowercase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def a__ ( self : List[str] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , *_UpperCAmelCase : Tuple , **_UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
__lowercase = kwargs.pop('main_process_only' , _UpperCAmelCase )
__lowercase = kwargs.pop('in_order' , _UpperCAmelCase )
if self.isEnabledFor(_UpperCAmelCase ):
if self._should_log(_UpperCAmelCase ):
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
elif in_order:
__lowercase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowercase , __lowercase = self.process(_UpperCAmelCase , _UpperCAmelCase )
self.logger.log(_UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , **_UpperCAmelCase )
state.wait_for_everyone()
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ) -> Optional[Any]:
if log_level is None:
__lowercase = os.environ.get('ACCELERATE_LOG_LEVEL' , SCREAMING_SNAKE_CASE )
__lowercase = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 325 | 0 |
def _a ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCAmelCase__ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCAmelCase__ = min(UpperCamelCase_ , UpperCamelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 340 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
__lowercase = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
__lowercase = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowercase = [3, 3, 3, 3]
__lowercase = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowercase = [4, 4, 4, 4]
__lowercase = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowercase = [3, 3, 3, 3]
if "lrf" in model_name:
__lowercase = [3, 3, 3, 3]
else:
__lowercase = [2, 2, 2, 2]
if "tiny" in model_name:
__lowercase = 96
elif "small" in model_name:
__lowercase = 96
elif "base" in model_name:
__lowercase = 128
elif "large" in model_name:
__lowercase = 192
elif "xlarge" in model_name:
__lowercase = 256
elif "huge" in model_name:
__lowercase = 352
# set label information
__lowercase = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowercase = 'imagenet-22k-id2label.json'
else:
__lowercase = 'imagenet-1k-id2label.json'
__lowercase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
__lowercase = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__lowercase = {v: k for k, v in idalabel.items()}
__lowercase = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE , depths=SCREAMING_SNAKE_CASE , focal_levels=SCREAMING_SNAKE_CASE , focal_windows=SCREAMING_SNAKE_CASE , use_conv_embed=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE , use_post_layernorm=SCREAMING_SNAKE_CASE , use_layerscale=SCREAMING_SNAKE_CASE , )
return config
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict ) -> Dict:
if "patch_embed.proj" in name:
__lowercase = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowercase = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowercase = 'encoder.' + name
if "encoder.layers" in name:
__lowercase = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowercase = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowercase = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowercase = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowercase = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowercase = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowercase = 'layernorm.weight'
if name == "norm.bias":
__lowercase = 'layernorm.bias'
if "head" in name:
__lowercase = name.replace('head' , 'classifier' )
else:
__lowercase = 'focalnet.' + name
return name
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> List[str]:
# fmt: off
__lowercase = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowercase = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE )
__lowercase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowercase = state_dict.pop(SCREAMING_SNAKE_CASE )
__lowercase = val
__lowercase = get_focalnet_config(SCREAMING_SNAKE_CASE )
__lowercase = FocalNetForImageClassification(SCREAMING_SNAKE_CASE )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# verify conversion
__lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowercase = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE , image_mean=SCREAMING_SNAKE_CASE , image_std=SCREAMING_SNAKE_CASE , )
__lowercase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
__lowercase = processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
__lowercase = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__lowercase = image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE , atol=1E-4 )
__lowercase = model(**SCREAMING_SNAKE_CASE )
__lowercase = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowercase = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
__lowercase = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
__lowercase = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
__lowercase = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
__lowercase = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
__lowercase = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor of {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if push_to_hub:
print(F"""Pushing model and processor of {model_name} to the hub...""" )
model.push_to_hub(F"""{model_name}""" )
processor.push_to_hub(F"""{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 325 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __a = 4 ):
snake_case_ : str = abs(__a ) or 4
return [[1 + x + y * row_size for x in range(__a )] for y in range(__a )]
def SCREAMING_SNAKE_CASE__ ( __a ):
return reverse_row(transpose(__a ) )
# OR.. transpose(reverse_column(matrix))
def SCREAMING_SNAKE_CASE__ ( __a ):
return reverse_row(reverse_column(__a ) )
# OR.. reverse_column(reverse_row(matrix))
def SCREAMING_SNAKE_CASE__ ( __a ):
return reverse_column(transpose(__a ) )
# OR.. transpose(reverse_row(matrix))
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Any = [list(__a ) for x in zip(*__a )]
return matrix
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : int = matrix[::-1]
return matrix
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : str = [x[::-1] for x in matrix]
return matrix
def SCREAMING_SNAKE_CASE__ ( __a ):
for i in matrix:
print(*__a )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 90 counterclockwise:\n""")
print_matrix(rotate_aa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 180:\n""")
print_matrix(rotate_aaa(matrix))
_SCREAMING_SNAKE_CASE = make_matrix()
print("""\norigin:\n""")
print_matrix(matrix)
print("""\nrotate 270 counterclockwise:\n""")
print_matrix(rotate_aaa(matrix))
| 327 |
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE__ = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Tuple = "mask2former"
lowerCAmelCase__ : List[Any] = ["swin"]
lowerCAmelCase__ : str = {"hidden_size": "hidden_dim"}
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[Dict] = None , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 2_56 , _UpperCAmelCase : int = 10_24 , _UpperCAmelCase : str = "relu" , _UpperCAmelCase : int = 6 , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 8 , _UpperCAmelCase : float = 0.0 , _UpperCAmelCase : int = 20_48 , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False , _UpperCAmelCase : int = 4 , _UpperCAmelCase : int = 2_55 , _UpperCAmelCase : int = 1_00 , _UpperCAmelCase : float = 0.1 , _UpperCAmelCase : float = 2.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : float = 5.0 , _UpperCAmelCase : int = 1_25_44 , _UpperCAmelCase : float = 3.0 , _UpperCAmelCase : float = 0.75 , _UpperCAmelCase : float = 0.02 , _UpperCAmelCase : float = 1.0 , _UpperCAmelCase : bool = True , _UpperCAmelCase : List[int] = [4, 8, 16, 32] , _UpperCAmelCase : bool = None , **_UpperCAmelCase : List[str] , ) -> int:
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.' )
__lowercase = CONFIG_MAPPING['swin'](
image_size=2_24 , in_channels=3 , patch_size=4 , embed_dim=96 , depths=[2, 2, 18, 2] , num_heads=[3, 6, 12, 24] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=_UpperCAmelCase , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = backbone_config.pop('model_type' )
__lowercase = CONFIG_MAPPING[backbone_model_type]
__lowercase = config_class.from_dict(_UpperCAmelCase )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {",".join(self.backbones_supported )}""" )
__lowercase = backbone_config
__lowercase = feature_size
__lowercase = mask_feature_size
__lowercase = hidden_dim
__lowercase = encoder_feedforward_dim
__lowercase = activation_function
__lowercase = encoder_layers
__lowercase = decoder_layers
__lowercase = num_attention_heads
__lowercase = dropout
__lowercase = dim_feedforward
__lowercase = pre_norm
__lowercase = enforce_input_projection
__lowercase = common_stride
__lowercase = ignore_value
__lowercase = num_queries
__lowercase = no_object_weight
__lowercase = class_weight
__lowercase = mask_weight
__lowercase = dice_weight
__lowercase = train_num_points
__lowercase = oversample_ratio
__lowercase = importance_sample_ratio
__lowercase = init_std
__lowercase = init_xavier_std
__lowercase = use_auxiliary_loss
__lowercase = feature_strides
__lowercase = output_auxiliary_logits
__lowercase = decoder_layers
super().__init__(**_UpperCAmelCase )
@classmethod
def a__ ( cls : Union[str, Any] , _UpperCAmelCase : PretrainedConfig , **_UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
return cls(
backbone_config=_UpperCAmelCase , **_UpperCAmelCase , )
def a__ ( self : str ) -> Dict[str, any]:
"""simple docstring"""
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.backbone_config.to_dict()
__lowercase = self.__class__.model_type
return output
| 325 | 0 |
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class __lowerCAmelCase ( lowerCAmelCase__ ):
lowerCamelCase_ : Any = ComputeEnvironment.AMAZON_SAGEMAKER
lowerCamelCase_ : Dict = True
lowerCamelCase_ : int = "ml.p3.2xlarge"
lowerCamelCase_ : Union[str, Any] = "accelerate_sagemaker_execution_role"
lowerCamelCase_ : Any = "hf-sm"
lowerCamelCase_ : Union[str, Any] = "us-east-1"
lowerCamelCase_ : Any = 1
lowerCamelCase_ : str = "accelerate-sagemaker-1"
lowerCamelCase_ : str = "1.6"
lowerCamelCase_ : Any = "4.4"
lowerCamelCase_ : List[str] = "train.py"
lowerCamelCase_ : List[Any] = [
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
lowerCamelCase_ : str = [
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : int = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['''model_name_or_path'''] , _UpperCAmelCase )
assert isinstance(converted_args['''do_train'''] , _UpperCAmelCase )
assert isinstance(converted_args['''epochs'''] , _UpperCAmelCase )
assert isinstance(converted_args['''learning_rate'''] , _UpperCAmelCase )
assert isinstance(converted_args['''max_steps'''] , _UpperCAmelCase )
with pytest.raises(_UpperCAmelCase ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 279 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F"""Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.""" )
if tokenizer_name is None:
__lowercase = TOKENIZER_CLASSES
else:
__lowercase = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F"""Loading tokenizer classes: {tokenizer_names}""" )
for tokenizer_name in tokenizer_names:
__lowercase = TOKENIZER_CLASSES[tokenizer_name]
__lowercase = True
if checkpoint_name is None:
__lowercase = list(tokenizer_class.max_model_input_sizes.keys() )
else:
__lowercase = [checkpoint_name]
logger.info(F"""For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}""" )
for checkpoint in checkpoint_names:
logger.info(F"""Loading {tokenizer_class.__class__.__name__} {checkpoint}""" )
# Load tokenizer
__lowercase = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F"""Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}""" )
# For organization names we create sub-directories
if "/" in checkpoint:
__lowercase , __lowercase = checkpoint.split('/' )
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
__lowercase = checkpoint
__lowercase = dump_path
else:
__lowercase = None
__lowercase = dump_path
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
__lowercase = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
__lowercase = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
__lowercase = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowercase = None
logger.info(F"""=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}""" )
__lowercase = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F"""=> File names {file_names}""" )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F"""=> removing {file_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 325 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"""configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""],
"""tokenization_roberta""": ["""RobertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["""RobertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaForCausalLM""",
"""RobertaForMaskedLM""",
"""RobertaForMultipleChoice""",
"""RobertaForQuestionAnswering""",
"""RobertaForSequenceClassification""",
"""RobertaForTokenClassification""",
"""RobertaModel""",
"""RobertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaForCausalLM""",
"""TFRobertaForMaskedLM""",
"""TFRobertaForMultipleChoice""",
"""TFRobertaForQuestionAnswering""",
"""TFRobertaForSequenceClassification""",
"""TFRobertaForTokenClassification""",
"""TFRobertaMainLayer""",
"""TFRobertaModel""",
"""TFRobertaPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"""FlaxRobertaForCausalLM""",
"""FlaxRobertaForMaskedLM""",
"""FlaxRobertaForMultipleChoice""",
"""FlaxRobertaForQuestionAnswering""",
"""FlaxRobertaForSequenceClassification""",
"""FlaxRobertaForTokenClassification""",
"""FlaxRobertaModel""",
"""FlaxRobertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 289 |
from math import isqrt, loga
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> list[int]:
__lowercase = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowercase = False
return [i for i in range(2 , SCREAMING_SNAKE_CASE ) if is_prime[i]]
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int = 800800 , SCREAMING_SNAKE_CASE : int = 800800 ) -> int:
__lowercase = degree * loga(SCREAMING_SNAKE_CASE )
__lowercase = int(SCREAMING_SNAKE_CASE )
__lowercase = calculate_prime_numbers(SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = len(SCREAMING_SNAKE_CASE ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(F'''{solution() = }''')
| 325 | 0 |
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(a__ ):
for j in range(a__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = [[float('''inf''' ) for _ in range(a__ )] for _ in range(a__ )]
for i in range(a__ ):
for j in range(a__ ):
SCREAMING_SNAKE_CASE : Dict = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a__ ):
# looping through rows of graph array
for i in range(a__ ):
# looping through columns of graph array
for j in range(a__ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
SCREAMING_SNAKE_CASE : Optional[int] = dist[i][k] + dist[k][j]
_print_dist(a__ , a__ )
return dist, v
if __name__ == "__main__":
a__ : List[Any] = int(input('''Enter number of vertices: '''))
a__ : Dict = int(input('''Enter number of edges: '''))
a__ : Dict = [[float('''inf''') for i in range(v)] for j in range(v)]
for i in range(v):
a__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('''\nEdge ''', i + 1)
a__ : Dict = int(input('''Enter source:'''))
a__ : Optional[Any] = int(input('''Enter destination:'''))
a__ : Union[str, Any] = float(input('''Enter weight:'''))
a__ : List[Any] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 313 |
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_sentencepiece_available():
import sentencepiece as sp
SCREAMING_SNAKE_CASE__ = 5
SCREAMING_SNAKE_CASE__ = 10
@require_sentencepiece
@require_tokenizers
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
lowerCAmelCase__ : Optional[Any] = SpeechaTextTokenizer
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : List[Any] = True
def a__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
__lowercase = sp.SentencePieceProcessor()
spm_model.Load(_UpperCAmelCase )
__lowercase = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_UpperCAmelCase ) )]
__lowercase = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
__lowercase = Path(self.tmpdirname )
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'] )
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self : str ) -> int:
"""simple docstring"""
__lowercase = '<pad>'
__lowercase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_UpperCAmelCase ) , 10_01 )
def a__ ( self : int ) -> Optional[Any]:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 10_01 )
def a__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
__lowercase = tokenizer.tokenize('This is a test' )
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) , [2_89, 50, 14, 1_74, 3_86] , )
__lowercase = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__lowercase = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 6_06, 3_51, 3_51, 3_51, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
__lowercase = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def a__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {'input_ids': [[37_91, 7_97, 31, 11, 64, 7_97, 31, 24_29, 4_33, 12, 11_76, 12, 20, 7_86, 9_15, 1_42, 24_13, 2_40, 37, 32_38, 7_97, 31, 11, 35, 93, 9_15, 1_42, 24_13, 2_40, 37, 55_40, 5_67, 12_76, 93, 37, 6_10, 40, 62, 4_55, 6_57, 10_42, 1_23, 7_80, 1_77, 37, 3_09, 2_41, 12_98, 5_14, 20, 2_92, 27_37, 1_14, 24_69, 2_41, 85, 64, 3_02, 5_48, 5_28, 4_23, 4, 5_09, 4_06, 4_23, 37, 6_01, 4, 7_77, 3_02, 5_48, 5_28, 4_23, 2_84, 4, 33_88, 5_11, 4_59, 4, 35_55, 40, 3_21, 3_02, 7_05, 4, 33_88, 5_11, 5_83, 3_26, 5, 5, 5, 62, 33_10, 5_60, 1_77, 26_80, 2_17, 15_08, 32, 31, 8_53, 4_18, 64, 5_83, 5_11, 16_05, 62, 35, 93, 5_60, 1_77, 26_80, 2_17, 15_08, 15_21, 64, 5_83, 5_11, 5_19, 62, 20, 15_15, 7_64, 20, 1_49, 2_61, 56_25, 79_72, 20, 55_40, 5_67, 12_76, 93, 39_25, 16_75, 11, 15, 8_02, 79_72, 5_76, 2_17, 15_08, 11, 35, 93, 12_53, 24_41, 15, 2_89, 6_52, 31, 4_16, 3_21, 38_42, 1_15, 40, 9_11, 8, 4_76, 6_19, 4, 3_80, 1_42, 4_23, 3_35, 2_40, 35, 93, 2_64, 8, 11, 3_35, 5_69, 4_20, 1_63, 5, 2], [2_60, 5_48, 5_28, 4_23, 20, 4_51, 20, 26_81, 11_53, 34_34, 20, 55_40, 37, 5_67, 1_26, 12_53, 24_41, 33_76, 4_49, 2_10, 4_31, 15_63, 1_77, 7_67, 55_40, 11, 12_03, 4_72, 11, 29_53, 6_85, 2_85, 3_64, 7_06, 11_53, 20, 67_99, 20, 28_69, 20, 44_64, 1_26, 40, 24_29, 20, 10_40, 8_66, 26_64, 4_18, 20, 3_18, 20, 17_26, 1_86, 20, 2_65, 5_22, 35, 93, 21_91, 46_34, 20, 10_40, 12, 67_99, 15, 2_28, 23_56, 1_42, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [25_75, 26_66, 6_84, 15_82, 11_76, 12, 6_27, 1_49, 6_19, 20, 49_02, 5_63, 11, 20, 1_49, 2_61, 34_20, 23_56, 1_74, 1_42, 47_14, 1_31, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class A__ ( unittest.TestCase ):
lowerCAmelCase__ : str = "valhalla/s2t_mustc_multilinguial_medium"
lowerCAmelCase__ : Dict = "C'est trop cool"
lowerCAmelCase__ : List[Any] = "Esto es genial"
@classmethod
def a__ ( cls : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def a__ ( self : Tuple ) -> Tuple:
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11 )
def a__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 1_00_00 )
def a__ ( self : str ) -> int:
"""simple docstring"""
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids )
__lowercase = [ES_CODE, 4, 16_01, 47, 76_47, 2]
__lowercase = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
__lowercase = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = 'fr'
__lowercase = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _UpperCAmelCase )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def a__ ( self : List[Any] ) -> Any:
"""simple docstring"""
__lowercase = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
__lowercase = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 325 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] ):
UpperCamelCase :List[str] = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
UpperCamelCase :List[str] = True if '''large''' in model_name or '''huge''' in model_name else False
UpperCamelCase :Optional[int] = True if '''large''' in model_name or '''huge''' in model_name else False
UpperCamelCase :Tuple = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
UpperCamelCase :List[Any] = [3, 3, 3, 3]
UpperCamelCase :List[Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
UpperCamelCase :Any = [4, 4, 4, 4]
UpperCamelCase :Union[str, Any] = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
UpperCamelCase :Tuple = [3, 3, 3, 3]
if "lrf" in model_name:
UpperCamelCase :List[Any] = [3, 3, 3, 3]
else:
UpperCamelCase :Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
UpperCamelCase :Any = 96
elif "small" in model_name:
UpperCamelCase :List[str] = 96
elif "base" in model_name:
UpperCamelCase :List[str] = 128
elif "large" in model_name:
UpperCamelCase :Tuple = 192
elif "xlarge" in model_name:
UpperCamelCase :Tuple = 256
elif "huge" in model_name:
UpperCamelCase :str = 352
# set label information
UpperCamelCase :Optional[int] = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
UpperCamelCase :Any = '''imagenet-22k-id2label.json'''
else:
UpperCamelCase :Optional[int] = '''imagenet-1k-id2label.json'''
UpperCamelCase :str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
UpperCamelCase :Dict = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCamelCase :Optional[int] = {v: k for k, v in idalabel.items()}
UpperCamelCase :Tuple = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE__ , depths=SCREAMING_SNAKE_CASE__ , focal_levels=SCREAMING_SNAKE_CASE__ , focal_windows=SCREAMING_SNAKE_CASE__ , use_conv_embed=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__ , use_post_layernorm=SCREAMING_SNAKE_CASE__ , use_layerscale=SCREAMING_SNAKE_CASE__ , )
return config
def _A ( SCREAMING_SNAKE_CASE__ : Dict ):
if "patch_embed.proj" in name:
UpperCamelCase :List[Any] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase :Tuple = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
UpperCamelCase :List[str] = '''encoder.''' + name
if "encoder.layers" in name:
UpperCamelCase :Union[str, Any] = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
UpperCamelCase :List[Any] = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
UpperCamelCase :List[Any] = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
UpperCamelCase :int = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
UpperCamelCase :Dict = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
UpperCamelCase :List[str] = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
UpperCamelCase :int = '''layernorm.weight'''
if name == "norm.bias":
UpperCamelCase :Union[str, Any] = '''layernorm.bias'''
if "head" in name:
UpperCamelCase :str = name.replace('''head''' , '''classifier''' )
else:
UpperCamelCase :Optional[Any] = '''focalnet.''' + name
return name
def _A ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any]=False ):
# fmt: off
UpperCamelCase :int = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
UpperCamelCase :Union[str, Any] = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , SCREAMING_SNAKE_CASE__ )
UpperCamelCase :str = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
UpperCamelCase :List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = val
UpperCamelCase :Optional[int] = get_focalnet_config(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[str] = FocalNetForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify conversion
UpperCamelCase :List[str] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCamelCase :str = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE__ , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , )
UpperCamelCase :int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
UpperCamelCase :int = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' )
UpperCamelCase :Optional[int] = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
UpperCamelCase :Optional[Any] = image_transforms(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
UpperCamelCase :Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
UpperCamelCase :List[Any] = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
UpperCamelCase :List[Any] = torch.tensor([0.21_66, -0.43_68, 0.21_91] )
elif model_name == "focalnet-tiny-lrf":
UpperCamelCase :Optional[Any] = torch.tensor([1.16_69, 0.01_25, -0.16_95] )
elif model_name == "focalnet-small":
UpperCamelCase :Union[str, Any] = torch.tensor([0.49_17, -0.04_30, 0.13_41] )
elif model_name == "focalnet-small-lrf":
UpperCamelCase :Union[str, Any] = torch.tensor([-0.25_88, -0.53_42, -0.23_31] )
elif model_name == "focalnet-base":
UpperCamelCase :Dict = torch.tensor([-0.16_55, -0.40_90, -0.17_30] )
elif model_name == "focalnet-base-lrf":
UpperCamelCase :List[Any] = torch.tensor([0.53_06, -0.04_83, -0.39_28] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
__snake_case = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 259 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = version.parse("1.12" )
@property
def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : int ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return 12
def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 325 | 0 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowercase : Union[str, Any] = 10
def lowercase__ ( snake_case_ :int , snake_case_ :int , snake_case_ :list[int] , snake_case_ :int ):
for i in range(snake_case_ , snake_case_ ):
if array[i] == target:
return i
return -1
def lowercase__ ( snake_case_ :list[int] , snake_case_ :int ):
__UpperCAmelCase = 0
__UpperCAmelCase = len(snake_case_ )
while left <= right:
if right - left < precision:
return lin_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = (left + right) // 3 + 1
__UpperCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
__UpperCAmelCase = one_third - 1
elif array[two_third] < target:
__UpperCAmelCase = two_third + 1
else:
__UpperCAmelCase = one_third + 1
__UpperCAmelCase = two_third - 1
else:
return -1
def lowercase__ ( snake_case_ :int , snake_case_ :int , snake_case_ :list[int] , snake_case_ :int ):
if left < right:
if right - left < precision:
return lin_search(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
__UpperCAmelCase = (left + right) // 3 + 1
__UpperCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(snake_case_ , one_third - 1 , snake_case_ , snake_case_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , snake_case_ , snake_case_ , snake_case_ )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case_ , snake_case_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase : Dict = input('Enter numbers separated by comma:\n').strip()
_lowercase : Optional[int] = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_lowercase : List[str] = int(input('Enter the number to be found in the list:\n').strip())
_lowercase : Dict = ite_ternary_search(collection, target)
_lowercase : Union[str, Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 332 |
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE__ = """RegNetConfig"""
# Base docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = [1, 1088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE__ = """facebook/regnet-y-040"""
SCREAMING_SNAKE_CASE__ = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE__ = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A__ ( nn.Module ):
def __init__( self : str , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : Optional[str] = "relu" , ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def a__ ( self : Tuple , _UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig ) -> Any:
"""simple docstring"""
super().__init__()
__lowercase = RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
__lowercase = config.num_channels
def a__ ( self : Optional[Any] , _UpperCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowercase = self.embedder(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 ) -> Optional[int]:
"""simple docstring"""
super().__init__()
__lowercase = nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
__lowercase = nn.BatchNormad(_UpperCAmelCase )
def a__ ( self : int , _UpperCAmelCase : Tensor ) -> Tensor:
"""simple docstring"""
__lowercase = self.convolution(_UpperCAmelCase )
__lowercase = self.normalization(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : int , _UpperCAmelCase : int , _UpperCAmelCase : int ) -> str:
"""simple docstring"""
super().__init__()
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
__lowercase = nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def a__ ( self : str , _UpperCAmelCase : Dict ) -> str:
"""simple docstring"""
__lowercase = self.pooler(_UpperCAmelCase )
__lowercase = self.attention(_UpperCAmelCase )
__lowercase = hidden_state * attention
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Optional[int] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Tuple:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : List[str] , _UpperCAmelCase : Tuple ) -> List[Any]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Union[str, Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 1 ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = max(1 , out_channels // config.groups_width )
__lowercase = (
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
__lowercase = ACTaFN[config.hidden_act]
def a__ ( self : Tuple , _UpperCAmelCase : Any ) -> List[str]:
"""simple docstring"""
__lowercase = hidden_state
__lowercase = self.layer(_UpperCAmelCase )
__lowercase = self.shortcut(_UpperCAmelCase )
hidden_state += residual
__lowercase = self.activation(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : List[Any] , _UpperCAmelCase : RegNetConfig , _UpperCAmelCase : int , _UpperCAmelCase : int , _UpperCAmelCase : int = 2 , _UpperCAmelCase : int = 2 , ) -> Dict:
"""simple docstring"""
super().__init__()
__lowercase = RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def a__ ( self : Any , _UpperCAmelCase : str ) -> int:
"""simple docstring"""
__lowercase = self.layers(_UpperCAmelCase )
return hidden_state
class A__ ( nn.Module ):
def __init__( self : Any , _UpperCAmelCase : RegNetConfig ) -> int:
"""simple docstring"""
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def a__ ( self : int , _UpperCAmelCase : Tensor , _UpperCAmelCase : bool = False , _UpperCAmelCase : bool = True ) -> BaseModelOutputWithNoAttention:
"""simple docstring"""
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(_UpperCAmelCase )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : Optional[Any] = RegNetConfig
lowerCAmelCase__ : Optional[int] = "regnet"
lowerCAmelCase__ : Dict = "pixel_values"
lowerCAmelCase__ : List[str] = True
def a__ ( self : Any , _UpperCAmelCase : Any ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a__ ( self : Any , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any]=False ) -> Dict:
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__lowercase = value
SCREAMING_SNAKE_CASE__ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE__ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class A__ ( lowerCAmelCase__ ):
def __init__( self : List[Any] , _UpperCAmelCase : Any ) -> str:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config
__lowercase = RegNetEmbeddings(_UpperCAmelCase )
__lowercase = RegNetEncoder(_UpperCAmelCase )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a__ ( self : Tuple , _UpperCAmelCase : Tensor , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
"""simple docstring"""
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(_UpperCAmelCase )
__lowercase = self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , lowerCAmelCase__ , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class A__ ( lowerCAmelCase__ ):
def __init__( self : str , _UpperCAmelCase : List[Any] ) -> Tuple:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
__lowercase = config.num_labels
__lowercase = RegNetModel(_UpperCAmelCase )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a__ ( self : List[Any] , _UpperCAmelCase : Optional[torch.FloatTensor] = None , _UpperCAmelCase : Optional[torch.LongTensor] = None , _UpperCAmelCase : Optional[bool] = None , _UpperCAmelCase : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
"""simple docstring"""
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(_UpperCAmelCase )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = 'single_label_classification'
else:
__lowercase = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states )
| 325 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.