code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Optional[int] = logging.get_logger(__name__)
snake_case : List[str] = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : List[Any] = '''convbert'''
def __init__( self :int ,__snake_case :str=3_05_22 ,__snake_case :Any=7_68 ,__snake_case :List[Any]=12 ,__snake_case :Tuple=12 ,__snake_case :Tuple=30_72 ,__snake_case :Tuple="gelu" ,__snake_case :Union[str, Any]=0.1 ,__snake_case :Tuple=0.1 ,__snake_case :Any=5_12 ,__snake_case :int=2 ,__snake_case :List[Any]=0.02 ,__snake_case :List[Any]=1E-12 ,__snake_case :Union[str, Any]=1 ,__snake_case :Optional[Any]=0 ,__snake_case :Tuple=2 ,__snake_case :Tuple=7_68 ,__snake_case :Dict=2 ,__snake_case :str=9 ,__snake_case :int=1 ,__snake_case :int=None ,**__snake_case :List[str] ,) -> Optional[Any]:
super().__init__(
pad_token_id=__snake_case ,bos_token_id=__snake_case ,eos_token_id=__snake_case ,**__snake_case ,)
a__ = vocab_size
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = max_position_embeddings
a__ = type_vocab_size
a__ = initializer_range
a__ = layer_norm_eps
a__ = embedding_size
a__ = head_ratio
a__ = conv_kernel_size
a__ = num_groups
a__ = classifier_dropout
class snake_case_ (lowerCamelCase_ ):
@property
def lowerCamelCase__( self :Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
a__ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a__ = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 714 |
from math import pi
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : int ):
return 2 * pi * radius * (angle / 3_6_0)
if __name__ == "__main__":
print(arc_length(90, 10))
| 657 | 0 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 715 |
from math import sqrt
def __lowercase ( __lowerCAmelCase : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __lowerCAmelCase : int = 1_0_0_0_1 ):
a__ = 0
a__ = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCAmelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCAmelCase ):
count += 1
return number
if __name__ == "__main__":
print(f"""{solution() = }""")
| 657 | 0 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case : List[Any] = logging.get_logger(__name__)
snake_case : Any = {
'''huggingface/time-series-transformer-tourism-monthly''': (
'''https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'''
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Optional[int] = '''time_series_transformer'''
UpperCAmelCase__ : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self :Any ,__snake_case :Optional[int] = None ,__snake_case :Optional[int] = None ,__snake_case :str = "student_t" ,__snake_case :str = "nll" ,__snake_case :int = 1 ,__snake_case :List[int] = [1, 2, 3, 4, 5, 6, 7] ,__snake_case :Optional[Union[str, bool]] = "mean" ,__snake_case :int = 0 ,__snake_case :int = 0 ,__snake_case :int = 0 ,__snake_case :int = 0 ,__snake_case :Optional[List[int]] = None ,__snake_case :Optional[List[int]] = None ,__snake_case :int = 32 ,__snake_case :int = 32 ,__snake_case :int = 2 ,__snake_case :int = 2 ,__snake_case :int = 2 ,__snake_case :int = 2 ,__snake_case :bool = True ,__snake_case :str = "gelu" ,__snake_case :int = 64 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :float = 0.1 ,__snake_case :int = 1_00 ,__snake_case :float = 0.02 ,__snake_case :Optional[int]=True ,**__snake_case :Optional[Any] ,) -> str:
# time series specific configuration
a__ = prediction_length
a__ = context_length or prediction_length
a__ = distribution_output
a__ = loss
a__ = input_size
a__ = num_time_features
a__ = lags_sequence
a__ = scaling
a__ = num_dynamic_real_features
a__ = num_static_real_features
a__ = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
a__ = cardinality
else:
a__ = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(__snake_case ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
a__ = embedding_dimension
else:
a__ = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality]
a__ = num_parallel_samples
# Transformer architecture configuration
a__ = input_size * len(__snake_case ) + self._number_of_features
a__ = d_model
a__ = encoder_attention_heads
a__ = decoder_attention_heads
a__ = encoder_ffn_dim
a__ = decoder_ffn_dim
a__ = encoder_layers
a__ = decoder_layers
a__ = dropout
a__ = attention_dropout
a__ = activation_dropout
a__ = encoder_layerdrop
a__ = decoder_layerdrop
a__ = activation_function
a__ = init_std
a__ = use_cache
super().__init__(is_encoder_decoder=__snake_case ,**__snake_case )
@property
def lowerCamelCase__( self :Dict ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 716 |
import unittest
from knapsack import greedy_knapsack as kp
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
a__ = [10, 20, 30, 40, 50, 60]
a__ = [2, 4, 6, 8, 10, 12]
a__ = 1_00
self.assertEqual(kp.calc_profit(__snake_case ,__snake_case ,__snake_case ) ,2_10 )
def lowerCamelCase__( self :str ) -> Optional[int]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :Optional[Any] ) -> int:
self.assertRaisesRegex(__snake_case ,'Weight can not be negative.' )
def lowerCamelCase__( self :str ) -> List[str]:
self.assertRaisesRegex(__snake_case ,'Profit can not be negative.' )
def lowerCamelCase__( self :str ) -> Optional[Any]:
self.assertRaisesRegex(__snake_case ,'max_weight must greater than zero.' )
def lowerCamelCase__( self :int ) -> List[Any]:
self.assertRaisesRegex(
__snake_case ,'The length of profit and weight must be same.' )
if __name__ == "__main__":
unittest.main()
| 657 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class snake_case_ (unittest.TestCase ):
def __init__( self :str ,__snake_case :Any ,__snake_case :List[Any]=7 ,__snake_case :Dict=3 ,__snake_case :Dict=10 ,__snake_case :Dict=18 ,__snake_case :Any=30 ,__snake_case :int=4_00 ,__snake_case :Union[str, Any]=True ,__snake_case :Union[str, Any]=None ,__snake_case :Any=True ,__snake_case :int=[0.5, 0.5, 0.5] ,__snake_case :List[Any]=[0.5, 0.5, 0.5] ,__snake_case :Any=None ,) -> Union[str, Any]:
a__ = size if size is not None else {'shortest_edge': 18}
a__ = crop_size if crop_size is not None else {'height': 18, 'width': 18}
a__ = parent
a__ = batch_size
a__ = num_channels
a__ = num_frames
a__ = image_size
a__ = min_resolution
a__ = max_resolution
a__ = do_resize
a__ = size
a__ = do_normalize
a__ = image_mean
a__ = image_std
a__ = crop_size
def lowerCamelCase__( self :List[str] ) -> Dict:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class snake_case_ (lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : List[str] = VivitImageProcessor if is_vision_available() else None
def lowerCamelCase__( self :Optional[Any] ) -> int:
a__ = VivitImageProcessingTester(self )
@property
def lowerCamelCase__( self :List[str] ) -> List[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__( self :str ) -> Dict:
a__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case ,'image_mean' ) )
self.assertTrue(hasattr(__snake_case ,'image_std' ) )
self.assertTrue(hasattr(__snake_case ,'do_normalize' ) )
self.assertTrue(hasattr(__snake_case ,'do_resize' ) )
self.assertTrue(hasattr(__snake_case ,'do_center_crop' ) )
self.assertTrue(hasattr(__snake_case ,'size' ) )
def lowerCamelCase__( self :Dict ) -> str:
a__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
a__ = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def lowerCamelCase__( self :Any ) -> int:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
a__ = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case ,__snake_case )
self.assertIsInstance(video[0] ,Image.Image )
# Test not batched input
a__ = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCamelCase__( self :str ) -> Any:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,numpify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case ,__snake_case )
self.assertIsInstance(video[0] ,np.ndarray )
# Test not batched input
a__ = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def lowerCamelCase__( self :Tuple ) -> Any:
# Initialize image_processing
a__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__snake_case ,torchify=__snake_case )
for video in video_inputs:
self.assertIsInstance(__snake_case ,__snake_case )
self.assertIsInstance(video[0] ,torch.Tensor )
# Test not batched input
a__ = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
a__ = image_processing(__snake_case ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_videos.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 717 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Any=1_0 ):
a__ = []
for _ in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str]=1_0 ):
a__ = []
for step in range(__lowerCAmelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
a__ = os.path.join(__lowerCAmelCase , 'schedule.bin' )
torch.save(scheduler.state_dict() , __lowerCAmelCase )
a__ = torch.load(__lowerCAmelCase )
scheduler.load_state_dict(__lowerCAmelCase )
return lrs
@require_torch
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Union[str, Any] ) -> int:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case )
def lowerCamelCase__( self :Optional[Any] ) -> str:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = AdamW(params=[w] ,lr=2E-1 ,weight_decay=0.0 )
for _ in range(1_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
def lowerCamelCase__( self :Tuple ) -> int:
a__ = torch.tensor([0.1, -0.2, -0.1] ,requires_grad=__snake_case )
a__ = torch.tensor([0.4, 0.2, -0.5] )
a__ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
a__ = Adafactor(
params=[w] ,lr=1E-2 ,eps=(1E-30, 1E-3) ,clip_threshold=1.0 ,decay_rate=-0.8 ,betaa=__snake_case ,weight_decay=0.0 ,relative_step=__snake_case ,scale_parameter=__snake_case ,warmup_init=__snake_case ,)
for _ in range(10_00 ):
a__ = criterion(__snake_case ,__snake_case )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() ,[0.4, 0.2, -0.5] ,tol=1E-2 )
@require_torch
class snake_case_ (unittest.TestCase ):
UpperCAmelCase__ : str = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
UpperCAmelCase__ : Dict = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 1_0
def lowerCamelCase__( self :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple ,__snake_case :int ,__snake_case :Any=None ) -> Optional[Any]:
self.assertEqual(len(__snake_case ) ,len(__snake_case ) )
for a, b in zip(__snake_case ,__snake_case ):
self.assertAlmostEqual(__snake_case ,__snake_case ,delta=__snake_case ,msg=__snake_case )
def lowerCamelCase__( self :Tuple ) -> List[Any]:
a__ = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
a__ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1E-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
a__ , a__ = data
a__ = scheduler_func(self.optimizer ,**__snake_case )
self.assertEqual(len([scheduler.get_lr()[0]] ) ,1 )
a__ = unwrap_schedule(__snake_case ,self.num_steps )
self.assertListAlmostEqual(
__snake_case ,__snake_case ,tol=1E-2 ,msg=F'failed for {scheduler_func} in normal scheduler' ,)
a__ = scheduler_func(self.optimizer ,**__snake_case )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__snake_case ) # wrap to test picklability of the schedule
a__ = unwrap_and_save_reload_schedule(__snake_case ,self.num_steps )
self.assertListEqual(__snake_case ,__snake_case ,msg=F'failed for {scheduler_func} in save and reload' )
class snake_case_ :
def __init__( self :Tuple ,__snake_case :str ) -> Any:
a__ = fn
def __call__( self :List[str] ,*__snake_case :Optional[Any] ,**__snake_case :Optional[int] ) -> Union[str, Any]:
return self.fn(*__snake_case ,**__snake_case )
@classmethod
def lowerCamelCase__( self :Tuple ,__snake_case :Union[str, Any] ) -> Dict:
a__ = list(map(self ,scheduler.lr_lambdas ) )
| 657 | 0 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class snake_case_ (unittest.TestCase ):
@require_torch
def lowerCamelCase__( self :List[str] ) -> Optional[Any]:
a__ = pipeline(
task='zero-shot-audio-classification' ,model='hf-internal-testing/tiny-clap-htsat-unfused' )
a__ = load_dataset('ashraq/esc50' )
a__ = dataset['train']['audio'][-1]['array']
a__ = audio_classifier(__snake_case ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__snake_case ) ,[{'score': 0.5_01, 'label': 'Sound of a dog'}, {'score': 0.4_99, 'label': 'Sound of vaccum cleaner'}] ,)
@unittest.skip('No models are available in TF' )
def lowerCamelCase__( self :Union[str, Any] ) -> List[str]:
pass
@slow
@require_torch
def lowerCamelCase__( self :Any ) -> Optional[Any]:
a__ = pipeline(
task='zero-shot-audio-classification' ,model='laion/clap-htsat-unfused' ,)
# This is an audio of a dog
a__ = load_dataset('ashraq/esc50' )
a__ = dataset['train']['audio'][-1]['array']
a__ = audio_classifier(__snake_case ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__snake_case ) ,[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
] ,)
a__ = audio_classifier([audio] * 5 ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] )
self.assertEqual(
nested_simplify(__snake_case ) ,[
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 ,)
a__ = audio_classifier(
[audio] * 5 ,candidate_labels=['Sound of a dog', 'Sound of vaccum cleaner'] ,batch_size=5 )
self.assertEqual(
nested_simplify(__snake_case ) ,[
[
{'score': 0.9_99, 'label': 'Sound of a dog'},
{'score': 0.0_01, 'label': 'Sound of vaccum cleaner'},
],
]
* 5 ,)
@unittest.skip('No models are available in TF' )
def lowerCamelCase__( self :List[str] ) -> Optional[int]:
pass
| 718 |
from __future__ import annotations
def __lowercase ( __lowerCAmelCase : list[int] ): # This function is recursive
a__ = len(__lowerCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
a__ = array[0]
a__ = False
a__ = 1
a__ = []
while not is_found and i < array_length:
if array[i] < pivot:
a__ = True
a__ = [element for element in array[i:] if element >= array[i]]
a__ = longest_subsequence(__lowerCAmelCase )
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
a__ = temp_array
else:
i += 1
a__ = [element for element in array[1:] if element >= pivot]
a__ = [pivot, *longest_subsequence(__lowerCAmelCase )]
if len(__lowerCAmelCase ) > len(__lowerCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 657 | 0 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class snake_case_ (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = IFPipeline
UpperCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
UpperCAmelCase__ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : int = PipelineTesterMixin.required_optional_params - {'''latents'''}
def lowerCamelCase__( self :Any ) -> List[Any]:
return self._get_dummy_components()
def lowerCamelCase__( self :List[Any] ,__snake_case :int ,__snake_case :int=0 ) -> Optional[Any]:
if str(__snake_case ).startswith('mps' ):
a__ = torch.manual_seed(__snake_case )
else:
a__ = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
a__ = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase__( self :Dict ) -> str:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' ,reason='float16 requires CUDA' )
def lowerCamelCase__( self :Optional[int] ) -> List[Any]:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase__( self :Union[str, Any] ) -> Tuple:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase__( self :Any ) -> List[str]:
self._test_save_load_local()
def lowerCamelCase__( self :List[str] ) -> List[Any]:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() ,reason='XFormers attention is only available with CUDA and `xformers` installed' ,)
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :List[str] ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Dict ) -> List[Any]:
# if
a__ = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa )
a__ = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' ,variant='fp16' ,torch_dtype=torch.floataa ,text_encoder=__snake_case ,tokenizer=__snake_case )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
a__ , a__ = pipe_a.encode_prompt('anime turtle' ,device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
a__ = None
a__ = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(__snake_case ,__snake_case ,__snake_case ,__snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
a__ = IFImgaImgPipeline(**pipe_a.components )
a__ = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(__snake_case ,__snake_case ,__snake_case ,__snake_case )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
a__ = IFInpaintingPipeline(**pipe_a.components )
a__ = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(__snake_case ,__snake_case ,__snake_case ,__snake_case )
def lowerCamelCase__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :List[str] ,__snake_case :Optional[Any] ,__snake_case :int ) -> Any:
# pipeline 1
_start_torch_memory_measurement()
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,num_inference_steps=2 ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (64, 64, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
# pipeline 2
_start_torch_memory_measurement()
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,generator=__snake_case ,num_inference_steps=2 ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :int ,__snake_case :str ,__snake_case :Any ,__snake_case :Union[str, Any] ) -> List[Any]:
# pipeline 1
_start_torch_memory_measurement()
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,num_inference_steps=2 ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (64, 64, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
# pipeline 2
_start_torch_memory_measurement()
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,original_image=__snake_case ,generator=__snake_case ,num_inference_steps=2 ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :Dict ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Dict ) -> Union[str, Any]:
# pipeline 1
_start_torch_memory_measurement()
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(1 ) ).to(__snake_case )
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,mask_image=__snake_case ,num_inference_steps=2 ,generator=__snake_case ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (64, 64, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
# pipeline 2
_start_torch_memory_measurement()
a__ = torch.Generator(device='cpu' ).manual_seed(0 )
a__ = floats_tensor((1, 3, 64, 64) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(0 ) ).to(__snake_case )
a__ = floats_tensor((1, 3, 2_56, 2_56) ,rng=random.Random(1 ) ).to(__snake_case )
a__ = pipe_a(
prompt_embeds=__snake_case ,negative_prompt_embeds=__snake_case ,image=__snake_case ,mask_image=__snake_case ,original_image=__snake_case ,generator=__snake_case ,num_inference_steps=2 ,output_type='np' ,)
a__ = output.images[0]
assert image.shape == (2_56, 2_56, 3)
a__ = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
a__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(__snake_case ,__snake_case )
def __lowercase ( ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 719 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
snake_case : Dict = logging.get_logger(__name__)
class snake_case_ (lowerCamelCase_ ):
UpperCAmelCase__ : Dict = ['''pixel_values''']
def __init__( self :Optional[Any] ,__snake_case :bool = True ,__snake_case :int = 32 ,__snake_case :Union[str, Any]=PILImageResampling.BILINEAR ,__snake_case :bool = True ,**__snake_case :Tuple ,) -> None:
a__ = do_resize
a__ = do_rescale
a__ = size_divisor
a__ = resample
super().__init__(**__snake_case )
def lowerCamelCase__( self :Union[str, Any] ,__snake_case :np.ndarray ,__snake_case :int ,__snake_case :Tuple ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :List[Any] ) -> np.ndarray:
a__ , a__ = get_image_size(__snake_case )
# Rounds the height and width down to the closest multiple of size_divisor
a__ = height // size_divisor * size_divisor
a__ = width // size_divisor * size_divisor
a__ = resize(__snake_case ,(new_h, new_w) ,resample=__snake_case ,data_format=__snake_case ,**__snake_case )
return image
def lowerCamelCase__( self :List[str] ,__snake_case :np.ndarray ,__snake_case :float ,__snake_case :Optional[ChannelDimension] = None ,**__snake_case :str ) -> np.ndarray:
return rescale(image=__snake_case ,scale=__snake_case ,data_format=__snake_case ,**__snake_case )
def lowerCamelCase__( self :Tuple ,__snake_case :Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] ,__snake_case :Optional[bool] = None ,__snake_case :Optional[int] = None ,__snake_case :Union[str, Any]=None ,__snake_case :Optional[bool] = None ,__snake_case :Optional[Union[TensorType, str]] = None ,__snake_case :ChannelDimension = ChannelDimension.FIRST ,**__snake_case :List[Any] ,) -> BatchFeature:
a__ = do_resize if do_resize is not None else self.do_resize
a__ = do_rescale if do_rescale is not None else self.do_rescale
a__ = size_divisor if size_divisor is not None else self.size_divisor
a__ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
a__ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
a__ = [to_numpy_array(__snake_case ) for img in images]
if do_resize:
a__ = [self.resize(__snake_case ,size_divisor=__snake_case ,resample=__snake_case ) for image in images]
if do_rescale:
a__ = [self.rescale(__snake_case ,scale=1 / 2_55 ) for image in images]
a__ = [to_channel_dimension_format(__snake_case ,__snake_case ) for image in images]
a__ = {'pixel_values': images}
return BatchFeature(data=__snake_case ,tensor_type=__snake_case )
| 657 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class snake_case_ (unittest.TestCase ):
def lowerCamelCase__( self :Optional[Any] ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__( self :Optional[int] ) -> Tuple:
a__ = StableDiffusionKDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' )
a__ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler('sample_euler' )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.manual_seed(0 )
a__ = sd_pipe([prompt] ,generator=__snake_case ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type='np' )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ = np.array([0.04_47, 0.04_92, 0.04_68, 0.04_08, 0.03_83, 0.04_08, 0.03_54, 0.03_80, 0.03_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__( self :str ) -> Any:
a__ = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
a__ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler('sample_euler' )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.manual_seed(0 )
a__ = sd_pipe([prompt] ,generator=__snake_case ,guidance_scale=9.0 ,num_inference_steps=20 ,output_type='np' )
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ = np.array([0.12_37, 0.13_20, 0.14_38, 0.13_59, 0.13_90, 0.11_32, 0.12_77, 0.11_75, 0.11_12] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def lowerCamelCase__( self :Dict ) -> Tuple:
a__ = StableDiffusionKDiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-1-base' )
a__ = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
sd_pipe.set_scheduler('sample_dpmpp_2m' )
a__ = 'A painting of a squirrel eating a burger'
a__ = torch.manual_seed(0 )
a__ = sd_pipe(
[prompt] ,generator=__snake_case ,guidance_scale=7.5 ,num_inference_steps=15 ,output_type='np' ,use_karras_sigmas=__snake_case ,)
a__ = output.images
a__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
a__ = np.array(
[0.11_38_16_89, 0.12_11_29_21, 0.1_38_94_57, 0.12_54_96_06, 0.1_24_49_64, 0.10_83_15_17, 0.11_56_28_66, 0.10_86_78_16, 0.10_49_90_48] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 720 |
def __lowercase ( __lowerCAmelCase : int ):
a__ = generate_pascal_triangle(__lowerCAmelCase )
for row_idx in range(__lowerCAmelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=' ' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=' ' )
else:
print(triangle[row_idx][col_idx] , end='' )
print()
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = []
for current_row_idx in range(__lowerCAmelCase ):
a__ = populate_current_row(__lowerCAmelCase , __lowerCAmelCase )
triangle.append(__lowerCAmelCase )
return triangle
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : int ):
a__ = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
a__ , a__ = 1, 1
for current_col_idx in range(1 , __lowerCAmelCase ):
calculate_current_element(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return current_row
def __lowercase ( __lowerCAmelCase : list[list[int]] , __lowerCAmelCase : list[int] , __lowerCAmelCase : int , __lowerCAmelCase : int , ):
a__ = triangle[current_row_idx - 1][current_col_idx - 1]
a__ = triangle[current_row_idx - 1][current_col_idx]
a__ = above_to_left_elt + above_to_right_elt
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('The input value of \'num_rows\' should be \'int\'' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'The input value of \'num_rows\' should be greater than or equal to 0' )
a__ = [[1]]
for row_index in range(1 , __lowerCAmelCase ):
a__ = [0] + result[-1] + [0]
a__ = row_index + 1
# Calculate the number of distinct elements in a row
a__ = sum(divmod(__lowerCAmelCase , 2 ) )
a__ = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
a__ = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
a__ = row_first_half + row_second_half
result.append(__lowerCAmelCase )
return result
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__lowerCAmelCase : Callable , __lowerCAmelCase : int ) -> None:
a__ = F'{func.__name__}({value})'
a__ = timeit(F'__main__.{call}' , setup='import __main__' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds' )
for value in range(1_5 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(__lowerCAmelCase , __lowerCAmelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 657 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
snake_case : Optional[int] = {
'''configuration_speech_to_text''': ['''SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Speech2TextConfig'''],
'''processing_speech_to_text''': ['''Speech2TextProcessor'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Optional[Any] = ['''Speech2TextTokenizer''']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = ['''Speech2TextFeatureExtractor''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSpeech2TextForConditionalGeneration''',
'''TFSpeech2TextModel''',
'''TFSpeech2TextPreTrainedModel''',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = [
'''SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Speech2TextForConditionalGeneration''',
'''Speech2TextModel''',
'''Speech2TextPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
snake_case : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 721 |
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
snake_case : str = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
snake_case : str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
snake_case : Tuple = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
snake_case : str = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 14]),
('''2H 5D 3C AS 5S''', False, [14, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [14, 13, 12, 11, 10]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
snake_case : Tuple = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
snake_case : int = (
('''JH AH TH KH QH''', 23),
('''JH 9H TH KH QH''', 22),
('''JC KH JS JD JH''', 21),
('''KH KC 3S 3H 3D''', 20),
('''8C 9C 5C 3C TC''', 19),
('''JS QS 9H TS KH''', 18),
('''7C 7S KH 2H 7H''', 17),
('''3C KH 5D 5S KH''', 16),
('''QH 8H KD JH 8S''', 15),
('''2D 6D 9D TH 7D''', 14),
)
def __lowercase ( ):
a__ , a__ = randrange(len(__lowerCAmelCase ) ), randrange(len(__lowerCAmelCase ) )
a__ = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
a__ , a__ = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowercase ( __lowerCAmelCase : int = 1_0_0 ):
return (generate_random_hand() for _ in range(__lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any ):
assert PokerHand(__lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Dict ):
a__ = PokerHand(__lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Any , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , __lowerCAmelCase )
def __lowercase ( __lowerCAmelCase : List[str] , __lowerCAmelCase : Any , __lowerCAmelCase : List[Any] ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowercase ( __lowerCAmelCase : str , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Tuple ):
assert PokerHand(__lowerCAmelCase ).compare_with(PokerHand(__lowerCAmelCase ) ) == expected
def __lowercase ( ):
a__ = [PokerHand(__lowerCAmelCase ) for hand in SORTED_HANDS]
a__ = poker_hands.copy()
shuffle(__lowerCAmelCase )
a__ = chain(sorted(__lowerCAmelCase ) )
for index, hand in enumerate(__lowerCAmelCase ):
assert hand == poker_hands[index]
def __lowercase ( ):
# Test that five high straights are compared correctly.
a__ = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=__lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowercase ( ):
# Multiple calls to five_high_straight function should still return True
# and shouldn't mutate the list in every call other than the first.
a__ = PokerHand('2C 4S AS 3D 5C' )
a__ = True
a__ = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowercase ( ):
# Problem number 54 from Project Euler
# Testing from poker_hands.txt file
a__ = 0
a__ = os.path.abspath(os.path.dirname(__lowerCAmelCase ) )
a__ = os.path.join(__lowerCAmelCase , 'poker_hands.txt' )
with open(__lowerCAmelCase ) as file_hand:
for line in file_hand:
a__ = line[:1_4].strip()
a__ = line[1_5:].strip()
a__ , a__ = PokerHand(__lowerCAmelCase ), PokerHand(__lowerCAmelCase )
a__ = player.compare_with(__lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 3_7_6
| 657 | 0 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_snake_case = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
_snake_case = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = create_model(
"HTSAT-tiny" , "roberta" , _lowerCamelCase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_lowerCamelCase , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : List[Any] = r".*sequential.(\d+).*"
_lowerCAmelCase : Optional[int] = r".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_lowerCAmelCase : Union[str, Any] = key.replace(_lowerCamelCase , _lowerCamelCase )
if re.match(_lowerCamelCase , _lowerCamelCase ):
# replace sequential layers with list
_lowerCAmelCase : Optional[Any] = re.match(_lowerCamelCase , _lowerCamelCase ).group(1 )
_lowerCAmelCase : Tuple = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_lowerCamelCase )//3}.linear." )
elif re.match(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : str = int(re.match(_lowerCamelCase , _lowerCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
_lowerCAmelCase : Optional[Any] = 1 if projecton_layer == 0 else 2
_lowerCAmelCase : Any = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
_lowerCAmelCase : int = value
_lowerCAmelCase : str = mixed_qkv.size(0 ) // 3
_lowerCAmelCase : Optional[int] = mixed_qkv[:qkv_dim]
_lowerCAmelCase : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
_lowerCAmelCase : Dict = mixed_qkv[qkv_dim * 2 :]
_lowerCAmelCase : Tuple = query_layer
_lowerCAmelCase : Optional[int] = key_layer
_lowerCAmelCase : Optional[Any] = value_layer
else:
_lowerCAmelCase : str = value
return model_state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = init_clap(_lowerCamelCase , enable_fusion=_lowerCamelCase )
clap_model.eval()
_lowerCAmelCase : Tuple = clap_model.state_dict()
_lowerCAmelCase : List[Any] = rename_state_dict(_lowerCamelCase )
_lowerCAmelCase : List[str] = ClapConfig()
_lowerCAmelCase : Tuple = enable_fusion
_lowerCAmelCase : str = ClapModel(_lowerCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
transformers_config.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
_snake_case = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = tempfile.mkdtemp()
_lowerCAmelCase : Optional[int] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"的",
"价",
"格",
"是",
"15",
"便",
"alex",
"##andra",
",",
"。",
"-",
"t",
"shirt",
]
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
_lowerCAmelCase : Optional[int] = {
"do_resize": True,
"size": {"height": 224, "width": 224},
"do_center_crop": True,
"crop_size": {"height": 18, "width": 18},
"do_normalize": True,
"image_mean": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"image_std": [0.26_862_954, 0.26_130_258, 0.27_577_711],
"do_convert_rgb": True,
}
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname, __a)
with open(self.image_processor_file, "w", encoding="utf-8") as fp:
json.dump(__a, __a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self, **__a):
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowerCAmelCase : Dict = [Image.fromarray(np.moveaxis(__a, 0, -1)) for x in image_inputs]
return image_inputs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCAmelCase : str = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
processor_slow.save_pretrained(self.tmpdirname)
_lowerCAmelCase : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname, use_fast=__a)
_lowerCAmelCase : Dict = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
processor_fast.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = ChineseCLIPProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, __a)
self.assertIsInstance(processor_fast.tokenizer, __a)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, __a)
self.assertIsInstance(processor_fast.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = ChineseCLIPProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : str = self.get_tokenizer(cls_token="(CLS)", sep_token="(SEP)")
_lowerCAmelCase : Any = self.get_image_processor(do_normalize=__a)
_lowerCAmelCase : Optional[int] = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname, cls_token="(CLS)", sep_token="(SEP)", do_normalize=__a)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, __a)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : List[str] = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : int = self.prepare_image_inputs()
_lowerCAmelCase : List[Any] = image_processor(__a, return_tensors="np")
_lowerCAmelCase : Tuple = processor(images=__a, return_tensors="np")
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1E-2)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Any = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Optional[Any] = "Alexandra,T-shirt的价格是15便士。"
_lowerCAmelCase : str = processor(text=__a)
_lowerCAmelCase : int = tokenizer(__a)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Union[str, Any] = "Alexandra,T-shirt的价格是15便士。"
_lowerCAmelCase : Dict = self.prepare_image_inputs()
_lowerCAmelCase : Optional[Any] = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), ["input_ids", "token_type_ids", "attention_mask", "pixel_values"])
# test if it raises when no input is passed
with pytest.raises(__a):
processor()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.get_image_processor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(__a)
_lowerCAmelCase : int = tokenizer.batch_decode(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ChineseCLIPProcessor(tokenizer=__a, image_processor=__a)
_lowerCAmelCase : Optional[int] = "Alexandra,T-shirt的价格是15便士。"
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Dict = processor(text=__a, images=__a)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
def A ( _lowerCamelCase = 10 , _lowerCamelCase = 22 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = range(1 , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = range(1 , _lowerCamelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['pixel_values']
def __init__( self, __a = True, __a = None, __a = PILImageResampling.BILINEAR, __a = True, __a = 1 / 255, __a = True, __a = None, __a = True, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : int = size if size is not None else {"shortest_edge": 224}
_lowerCAmelCase : Optional[int] = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {"height": 256, "width": 256}
_lowerCAmelCase : Dict = get_size_dict(__a, param_name="crop_size")
_lowerCAmelCase : Any = do_resize
_lowerCAmelCase : Optional[int] = size
_lowerCAmelCase : int = resample
_lowerCAmelCase : Union[str, Any] = do_rescale
_lowerCAmelCase : int = rescale_factor
_lowerCAmelCase : str = do_center_crop
_lowerCAmelCase : Any = crop_size
_lowerCAmelCase : str = do_flip_channel_order
def snake_case__ ( self, __a, __a, __a = PIL.Image.BILINEAR, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = get_size_dict(__a, default_to_square=__a)
if "shortest_edge" not in size:
raise ValueError(f"The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}")
_lowerCAmelCase : int = get_resize_output_image_size(__a, size=size["shortest_edge"], default_to_square=__a)
return resize(__a, size=__a, resample=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(__a)
if "height" not in size or "width" not in size:
raise ValueError(f"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}")
return center_crop(__a, size=(size["height"], size["width"]), data_format=__a, **__a)
def snake_case__ ( self, __a, __a, __a = None, **__a, ):
'''simple docstring'''
return rescale(__a, scale=__a, data_format=__a, **__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
return flip_channel_order(__a, data_format=__a)
def snake_case__ ( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = None, __a = ChannelDimension.FIRST, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Any = resample if resample is not None else self.resample
_lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : List[str] = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
_lowerCAmelCase : Optional[int] = size if size is not None else self.size
_lowerCAmelCase : List[Any] = get_size_dict(__a, default_to_square=__a)
_lowerCAmelCase : List[Any] = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : Union[str, Any] = get_size_dict(__a, param_name="crop_size")
_lowerCAmelCase : Optional[int] = make_list_of_images(__a)
if not valid_images(__a):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
_lowerCAmelCase : int = [to_numpy_array(__a) for image in images]
if do_resize:
_lowerCAmelCase : int = [self.resize(image=__a, size=__a, resample=__a) for image in images]
if do_center_crop:
_lowerCAmelCase : List[Any] = [self.center_crop(image=__a, size=__a) for image in images]
if do_rescale:
_lowerCAmelCase : Union[str, Any] = [self.rescale(image=__a, scale=__a) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
_lowerCAmelCase : List[str] = [self.flip_channel_order(image=__a) for image in images]
_lowerCAmelCase : Union[str, Any] = [to_channel_dimension_format(__a, __a) for image in images]
_lowerCAmelCase : Tuple = {"pixel_values": images}
return BatchFeature(data=__a, tensor_type=__a)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : List[str] = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__a) != len(__a):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__a):
_lowerCAmelCase : Tuple = target_sizes.numpy()
_lowerCAmelCase : int = []
for idx in range(len(__a)):
_lowerCAmelCase : int = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=__a)
_lowerCAmelCase : Tuple = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__a)
else:
_lowerCAmelCase : List[str] = logits.argmax(dim=1)
_lowerCAmelCase : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
print((lambda quine: quine % quine)("print((lambda quine: quine %% quine)(%r))"))
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = "▁"
_snake_case = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BigBirdTokenizer
lowerCamelCase__ = BigBirdTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : List[str] = self.tokenizer_class(__a, keep_accents=__a)
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "<s>"
_lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a), __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "[MASK]")
self.assertEqual(len(__a), 1004)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 1000)
def snake_case__ ( self):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
_lowerCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
_lowerCAmelCase : Optional[int] = tokenizer.tokenize(__a)
_lowerCAmelCase : Any = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : int = tokenizer.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : Dict = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : str = self.get_rust_tokenizer()
_lowerCAmelCase : List[Any] = tokenizer.encode(__a)
_lowerCAmelCase : Any = rust_tokenizer.encode(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = BigBirdTokenizer(__a, keep_accents=__a)
_lowerCAmelCase : List[Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__a, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a), [285, 46, 10, 170, 382], )
_lowerCAmelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
], )
_lowerCAmelCase : Dict = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], )
_lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(__a)
self.assertListEqual(
__a, [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
], )
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = "Hello World!"
_lowerCAmelCase : Any = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(__a, self.big_tokenizer.encode(__a))
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
_lowerCAmelCase : Optional[Any] = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__a, self.big_tokenizer.encode(__a))
@require_torch
@slow
def snake_case__ ( self):
'''simple docstring'''
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
_lowerCAmelCase : Any = list(self.big_tokenizer.get_vocab().keys())[:10]
_lowerCAmelCase : str = " ".join(__a)
_lowerCAmelCase : int = self.big_tokenizer.encode_plus(__a, return_tensors="pt", return_token_type_ids=__a)
_lowerCAmelCase : List[Any] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence], return_tensors="pt", return_token_type_ids=__a)
_lowerCAmelCase : Tuple = BigBirdConfig(attention_type="original_full")
_lowerCAmelCase : List[Any] = BigBirdModel(__a)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__a)
model(**__a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
_lowerCAmelCase : Tuple = tokenizer.decode(tokenizer("Paris is the [MASK].").input_ids)
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]")
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = {"input_ids": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a, model_name="google/bigbird-roberta-base", revision="215c99f1600e06f83acce68422f2035b2b5c3510", )
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_snake_case = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
_snake_case = []
_snake_case = []
_snake_case = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
_snake_case = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'''🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results''',
"emoji": True,
},
}
]
_snake_case = 0
for log in Path().glob("*.log"):
_snake_case = 0
with open(log, "r") as f:
for line in f:
_snake_case = json.loads(line)
if line.get("nodeid", "") != "":
_snake_case = line["nodeid"]
if line.get("duration", None) is not None:
_snake_case = f'''{line["duration"]:.4f}'''
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_snake_case = []
log.unlink()
_snake_case = ""
_snake_case = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
_snake_case = []
_snake_case = {}
for test in failed_tests:
_snake_case = test[0].split("::")
_snake_case = data[0].split("/")[-1]
if data[0] not in filesafailed:
_snake_case = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_snake_case = [test[0] for test in failed_table]
_snake_case = list(set(files))
# Count number of instances in failed_tests
_snake_case = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_snake_case = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
_snake_case = "Too many failed tests, please see the full report in the Action results."
_snake_case = len(err) + 10
_snake_case = message[: 3000 - offset] + f'''\n...\n```\n{err}'''
print(f'''### {message}''')
else:
_snake_case = "No failed tests! 🤗"
print(f'''## {message}''')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
_snake_case = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
_snake_case = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'''https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}''',
},
}
payload.append(action_button)
_snake_case = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'''Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}''',
}
],
}
payload.append(date_report)
_snake_case = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
_snake_case = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_snake_case = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_snake_case = row[0]
else:
_snake_case = ""
_snake_case = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'''Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```''',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ :
def __init__( self, __a, __a=2, __a=True, __a=False, __a=10, __a=3, __a=32 * 4, __a=32 * 6, __a=4, __a=32, ):
'''simple docstring'''
_lowerCAmelCase : Any = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : List[Any] = is_training
_lowerCAmelCase : Any = use_auxiliary_loss
_lowerCAmelCase : Any = num_queries
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Optional[Any] = min_size
_lowerCAmelCase : Optional[Any] = max_size
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : Any = mask_feature_size
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
__a)
_lowerCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size], device=__a)
_lowerCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size], device=__a) > 0.5
).float()
_lowerCAmelCase : Optional[int] = (torch.rand((self.batch_size, self.num_labels), device=__a) > 0.5).long()
_lowerCAmelCase : Any = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def snake_case__ ( self):
'''simple docstring'''
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1], ), decoder_config=DetrConfig(
decoder_ffn_dim=128, num_queries=self.num_queries, decoder_attention_heads=2, d_model=self.mask_feature_size, ), mask_feature_size=self.mask_feature_size, fpn_feature_size=self.mask_feature_size, num_channels=self.num_channels, num_labels=self.num_labels, )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase : int = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = output.encoder_hidden_states
_lowerCAmelCase : List[Any] = output.pixel_decoder_hidden_states
_lowerCAmelCase : Dict = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__a), len(config.backbone_config.depths))
self.parent.assertTrue(len(__a), len(config.backbone_config.depths))
self.parent.assertTrue(len(__a), config.decoder_config.decoder_layers)
def snake_case__ ( self, __a, __a, __a, __a=False):
'''simple docstring'''
with torch.no_grad():
_lowerCAmelCase : List[str] = MaskFormerModel(config=__a)
model.to(__a)
model.eval()
_lowerCAmelCase : List[Any] = model(pixel_values=__a, pixel_mask=__a)
_lowerCAmelCase : str = model(__a, output_hidden_states=__a)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape, (self.batch_size, self.num_queries, self.mask_feature_size), )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(__a, __a)
def snake_case__ ( self, __a, __a, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = MaskFormerForInstanceSegmentation(config=__a)
model.to(__a)
model.eval()
def comm_check_on_output(__a):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape, (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4), )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape, (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(pixel_values=__a, pixel_mask=__a)
_lowerCAmelCase : int = model(__a)
comm_check_on_output(__a)
_lowerCAmelCase : Any = model(
pixel_values=__a, pixel_mask=__a, mask_labels=__a, class_labels=__a)
comm_check_on_output(__a)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape, torch.Size([1]))
@require_torch
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCamelCase__ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = MaskFormerModelTester(self)
_lowerCAmelCase : Union[str, Any] = ConfigTester(self, config_class=__a, has_text_modality=__a)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__a, **__a, output_hidden_states=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__a)
@unittest.skip(reason="MaskFormer does not use inputs_embeds")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not have a get_input_embeddings method")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer is not a generative model")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip(reason="MaskFormer does not use token embeddings")
def snake_case__ ( self):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`")
def snake_case__ ( self):
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(__a)
_lowerCAmelCase : List[Any] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Any = [*signature.parameters.keys()]
_lowerCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1], __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
for model_name in ["facebook/maskformer-swin-small-coco"]:
_lowerCAmelCase : str = MaskFormerModel.from_pretrained(__a)
self.assertIsNotNone(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = (self.model_tester.min_size,) * 2
_lowerCAmelCase : Any = {
"pixel_values": torch.randn((2, 3, *size), device=__a),
"mask_labels": torch.randn((2, 10, *size), device=__a),
"class_labels": torch.zeros(2, 10, device=__a).long(),
}
_lowerCAmelCase : str = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(__a)
_lowerCAmelCase : Tuple = model(**__a)
self.assertTrue(outputs.loss is not None)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__a, **__a, output_hidden_states=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Any = model_class(__a).to(__a)
_lowerCAmelCase : Tuple = model(**__a, output_attentions=__a)
self.assertTrue(outputs.attentions is not None)
def snake_case__ ( self):
'''simple docstring'''
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
_lowerCAmelCase : Any = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : List[str] = model_class(__a)
model.to(__a)
model.train()
_lowerCAmelCase : Optional[int] = model(__a, mask_labels=__a, class_labels=__a).loss
loss.backward()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.all_model_classes[1]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCAmelCase : Dict = True
_lowerCAmelCase : str = True
_lowerCAmelCase : List[Any] = model_class(__a)
model.to(__a)
model.train()
_lowerCAmelCase : Optional[Any] = model(__a, mask_labels=__a, class_labels=__a)
_lowerCAmelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCAmelCase : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
_lowerCAmelCase : Optional[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCAmelCase : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__a)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
_snake_case = 1e-4
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase):
@cached_property
def snake_case__ ( self):
'''simple docstring'''
return (
MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco")
if is_vision_available()
else None
)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco").to(__a)
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : List[Any] = prepare_img()
_lowerCAmelCase : Union[str, Any] = image_processor(__a, return_tensors="pt").to(__a)
_lowerCAmelCase : Tuple = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__a, (1, 3, 800, 1088))
with torch.no_grad():
_lowerCAmelCase : int = model(**__a)
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-0.0_482, 0.9_228, 0.4_951], [-0.2_547, 0.8_017, 0.8_527], [-0.0_069, 0.3_385, -0.0_089]]).to(__a)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3], __a, atol=__a))
_lowerCAmelCase : List[Any] = torch.tensor(
[[-0.8_422, -0.8_434, -0.9_718], [-1.0_144, -0.5_565, -0.4_195], [-1.0_038, -0.4_484, -0.1_961]]).to(__a)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3], __a, atol=__a))
_lowerCAmelCase : Optional[int] = torch.tensor(
[[0.2_852, -0.0_159, 0.9_735], [0.6_254, 0.1_858, 0.8_529], [-0.0_680, -0.4_116, 1.8_413]]).to(__a)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3], __a, atol=__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco")
.to(__a)
.eval()
)
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : List[Any] = prepare_img()
_lowerCAmelCase : Any = image_processor(__a, return_tensors="pt").to(__a)
_lowerCAmelCase : Union[str, Any] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__a, (1, 3, 800, 1088))
with torch.no_grad():
_lowerCAmelCase : int = model(**__a)
# masks_queries_logits
_lowerCAmelCase : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
_lowerCAmelCase : List[str] = [
[-1.3_737_124, -1.7_724_937, -1.9_364_233],
[-1.5_977_281, -1.9_867_939, -2.1_523_695],
[-1.5_795_398, -1.9_269_832, -2.093_942],
]
_lowerCAmelCase : Tuple = torch.tensor(__a).to(__a)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], __a, atol=__a))
# class_queries_logits
_lowerCAmelCase : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
_lowerCAmelCase : List[str] = torch.tensor(
[
[1.6512E00, -5.2572E00, -3.3519E00],
[3.6169E-02, -5.9025E00, -2.9313E00],
[1.0766E-04, -7.7630E00, -5.1263E00],
]).to(__a)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], __a, atol=__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff")
.to(__a)
.eval()
)
_lowerCAmelCase : List[Any] = self.default_image_processor
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Dict = image_processor(__a, return_tensors="pt").to(__a)
_lowerCAmelCase : List[str] = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(__a, (1, 3, 800, 1088))
with torch.no_grad():
_lowerCAmelCase : str = model(**__a)
# masks_queries_logits
_lowerCAmelCase : Optional[int] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape, (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4), )
_lowerCAmelCase : List[Any] = [[-0.9_046, -2.6_366, -4.6_062], [-3.4_179, -5.7_890, -8.8_057], [-4.9_179, -7.6_560, -10.7_711]]
_lowerCAmelCase : int = torch.tensor(__a).to(__a)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3], __a, atol=__a))
# class_queries_logits
_lowerCAmelCase : int = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape, (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
_lowerCAmelCase : Dict = torch.tensor(
[[4.7_188, -3.2_585, -2.8_857], [6.6_871, -2.9_181, -1.2_487], [7.2_449, -2.2_764, -2.1_874]]).to(__a)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3], __a, atol=__a))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = (
MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco")
.to(__a)
.eval()
)
_lowerCAmelCase : Tuple = self.default_image_processor
_lowerCAmelCase : Optional[Any] = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))], segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)], return_tensors="pt", )
_lowerCAmelCase : int = inputs["pixel_values"].to(__a)
_lowerCAmelCase : List[Any] = [el.to(__a) for el in inputs["mask_labels"]]
_lowerCAmelCase : Dict = [el.to(__a) for el in inputs["class_labels"]]
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**__a)
self.assertTrue(outputs.loss is not None)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = "▁"
_snake_case = {"vocab_file": "sentencepiece.bpe.model"}
_snake_case = {
"vocab_file": {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model"
),
}
}
_snake_case = {
"xlm-roberta-base": 512,
"xlm-roberta-large": 512,
"xlm-roberta-large-finetuned-conll02-dutch": 512,
"xlm-roberta-large-finetuned-conll02-spanish": 512,
"xlm-roberta-large-finetuned-conll03-english": 512,
"xlm-roberta-large-finetuned-conll03-german": 512,
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a="<s>", __a="</s>", __a="</s>", __a="<s>", __a="<unk>", __a="<pad>", __a="<mask>", __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Dict = AddedToken(__a, lstrip=__a, rstrip=__a) if isinstance(__a, __a) else mask_token
_lowerCAmelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, cls_token=__a, pad_token=__a, mask_token=__a, sp_model_kwargs=self.sp_model_kwargs, **__a, )
_lowerCAmelCase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(str(__a))
_lowerCAmelCase : Dict = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_lowerCAmelCase : Optional[int] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_lowerCAmelCase : Union[str, Any] = 1
_lowerCAmelCase : List[Any] = len(self.sp_model) + self.fairseq_offset
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = self.__dict__.copy()
_lowerCAmelCase : Tuple = None
_lowerCAmelCase : int = self.sp_model.serialized_model_proto()
return state
def __setstate__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs"):
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.LoadFromSerializedProto(self.sp_model_proto)
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : Tuple = [self.cls_token_id]
_lowerCAmelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a, token_ids_a=__a, already_has_special_tokens=__a)
if token_ids_a is None:
return [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1, 1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Dict = [self.sep_token_id]
_lowerCAmelCase : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.sp_model) + self.fairseq_offset + 1 # Add the <mask> token
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def snake_case__ ( self, __a):
'''simple docstring'''
return self.sp_model.encode(__a, out_type=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_lowerCAmelCase : Dict = self.sp_model.PieceToId(__a)
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case__ ( self, __a):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = "".join(__a).replace(__a, " ").strip()
return out_string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Tuple = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file, __a)
elif not os.path.isfile(self.vocab_file):
with open(__a, "wb") as fi:
_lowerCAmelCase : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__a)
return (out_vocab_file,)
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
_lowerCAmelCase : str = s_dict.pop(_lowerCamelCase )
elif "subsample" in key:
_lowerCAmelCase : Optional[int] = s_dict.pop(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Tuple = emb.weight.shape
_lowerCAmelCase : Any = nn.Linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
_lowerCAmelCase : List[Any] = emb.weight.data
return lin_layer
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : Optional[Any] = mam_aaa["args"]
_lowerCAmelCase : Optional[Any] = mam_aaa["model"]
_lowerCAmelCase : str = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(_lowerCamelCase )
rename_keys(_lowerCamelCase )
_lowerCAmelCase : str = state_dict["decoder.embed_tokens.weight"].shape[0]
_lowerCAmelCase : str = args.share_decoder_input_output_embed
_lowerCAmelCase : List[Any] = [int(_lowerCamelCase ) for i in args.conv_kernel_sizes.split("," )]
_lowerCAmelCase : Any = SpeechaTextConfig(
vocab_size=_lowerCamelCase , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(_lowerCamelCase ) , conv_channels=args.conv_channels , conv_kernel_sizes=_lowerCamelCase , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_lowerCamelCase , num_beams=5 , max_length=200 , use_cache=_lowerCamelCase , decoder_start_token_id=2 , early_stopping=_lowerCamelCase , )
_lowerCAmelCase : str = SpeechaTextForConditionalGeneration(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Dict = model.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if len(_lowerCamelCase ) > 0 and not set(_lowerCamelCase ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F" but all the following weights are missing {missing}" )
if tie_embeds:
_lowerCAmelCase : Tuple = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
_lowerCAmelCase : str = lm_head_weights
model.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
_snake_case = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vivit'
def __init__( self, __a=224, __a=32, __a=[2, 16, 16], __a=3, __a=768, __a=12, __a=12, __a=3072, __a="gelu_fast", __a=0.0, __a=0.0, __a=0.02, __a=1E-06, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : str = num_frames
_lowerCAmelCase : Optional[Any] = tubelet_size
_lowerCAmelCase : Optional[int] = num_channels
_lowerCAmelCase : List[str] = qkv_bias
super().__init__(**__a)
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.linear_k": "encoder.layers.*.self_attn.linear_k",
"self_attn.linear_v": "encoder.layers.*.self_attn.linear_v",
"self_attn.linear_q": "encoder.layers.*.self_attn.linear_q",
"self_attn.pos_bias_u": "encoder.layers.*.self_attn.pos_bias_u",
"self_attn.pos_bias_v": "encoder.layers.*.self_attn.pos_bias_v",
"self_attn.linear_out": "encoder.layers.*.self_attn.linear_out",
"self_attn.linear_pos": "encoder.layers.*.self_attn.linear_pos",
"self_attn.rotary_emb": "encoder.embed_positions",
"self_attn_layer_norm": "encoder.layers.*.self_attn_layer_norm",
"conv_module.pointwise_conv1": "encoder.layers.*.conv_module.pointwise_conv1",
"conv_module.pointwise_conv2": "encoder.layers.*.conv_module.pointwise_conv2",
"conv_module.depthwise_conv": "encoder.layers.*.conv_module.depthwise_conv",
"conv_module.batch_norm": "encoder.layers.*.conv_module.batch_norm",
"conv_module.layer_norm": "encoder.layers.*.conv_module.layer_norm",
"ffn1.w_1": "encoder.layers.*.ffn1.intermediate_dense",
"ffn1.w_2": "encoder.layers.*.ffn1.output_dense",
"ffn1.layer_norm": "encoder.layers.*.ffn1_layer_norm",
"ffn2.w_1": "encoder.layers.*.ffn2.intermediate_dense",
"ffn2.w_2": "encoder.layers.*.ffn2.output_dense",
"ffn2.layer_norm": "encoder.layers.*.ffn2_layer_norm",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
_snake_case = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for attribute in key.split("." ):
_lowerCAmelCase : int = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : Dict = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCAmelCase : str = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : List[str] = value
elif weight_type == "weight_v":
_lowerCAmelCase : Dict = value
elif weight_type == "bias":
_lowerCAmelCase : int = value
elif weight_type == "running_mean":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "running_var":
_lowerCAmelCase : Tuple = value
elif weight_type == "num_batches_tracked":
_lowerCAmelCase : Tuple = value
elif weight_type == "inv_freq":
_lowerCAmelCase : List[Any] = value
else:
_lowerCAmelCase : Tuple = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
_lowerCAmelCase : Dict = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , )
_lowerCAmelCase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : Union[str, Any] = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
_lowerCAmelCase : str = True
if "*" in mapped_key:
_lowerCAmelCase : Union[str, Any] = name.split(_lowerCamelCase )[0].split("." )[-2]
_lowerCAmelCase : Tuple = mapped_key.replace("*" , _lowerCamelCase )
if "pos_bias_u" in name:
_lowerCAmelCase : Optional[Any] = None
elif "pos_bias_v" in name:
_lowerCAmelCase : List[Any] = None
elif "weight_g" in name:
_lowerCAmelCase : List[str] = "weight_g"
elif "weight_v" in name:
_lowerCAmelCase : Union[str, Any] = "weight_v"
elif "bias" in name:
_lowerCAmelCase : int = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_lowerCAmelCase : Any = "weight"
elif "running_mean" in name:
_lowerCAmelCase : List[str] = "running_mean"
elif "inv_freq" in name:
_lowerCAmelCase : Any = "inv_freq"
elif "running_var" in name:
_lowerCAmelCase : List[str] = "running_var"
elif "num_batches_tracked" in name:
_lowerCAmelCase : Optional[int] = "num_batches_tracked"
else:
_lowerCAmelCase : int = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = full_name.split("conv_layers." )[-1]
_lowerCAmelCase : Any = name.split("." )
_lowerCAmelCase : Optional[int] = int(items[0] )
_lowerCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_lowerCAmelCase : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_lowerCAmelCase : Any = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_lowerCAmelCase : Dict = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_lowerCAmelCase : int = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=True ):
'''simple docstring'''
if config_path is not None:
_lowerCAmelCase : int = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish" )
else:
_lowerCAmelCase : Union[str, Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_lowerCAmelCase : List[Any] = "rotary"
if is_finetuned:
if dict_path:
_lowerCAmelCase : Dict = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : Dict = target_dict.pad_index
_lowerCAmelCase : Optional[Any] = target_dict.bos_index
_lowerCAmelCase : List[Any] = target_dict.eos_index
_lowerCAmelCase : List[str] = len(target_dict.symbols )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , "vocab.json" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
_lowerCAmelCase : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : str = 1
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , )
_lowerCAmelCase : Dict = True if config.feat_extract_norm == "layer" else False
_lowerCAmelCase : Tuple = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCAmelCase : str = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : str = WavaVecaConformerForCTC(_lowerCamelCase )
else:
_lowerCAmelCase : Dict = WavaVecaConformerForPreTraining(_lowerCamelCase )
if is_finetuned:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
_lowerCAmelCase : int = argparse.Namespace(task="audio_pretraining" )
_lowerCAmelCase : Dict = fairseq.tasks.setup_task(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
_snake_case = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
if isinstance(_lowerCamelCase , _lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_lowerCamelCase ) )
elif isinstance(_lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = []
for d in reversed(_lowerCamelCase ):
idx.append(flat_idx % d )
_lowerCAmelCase : Any = flat_idx // d
return tuple(reversed(_lowerCamelCase ) )
@torch.jit.ignore
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None , ):
'''simple docstring'''
def reduce_edge_list(_lowerCamelCase ) -> None:
_lowerCAmelCase : Dict = True
for i in range(len(_lowerCamelCase ) ):
_lowerCAmelCase : Tuple = -1 * (i + 1)
l[reversed_idx] &= tally
_lowerCAmelCase : List[str] = l[reversed_idx]
if start_edges is None:
_lowerCAmelCase : List[Any] = [s == 0 for s in start]
reduce_edge_list(_lowerCamelCase )
if end_edges is None:
_lowerCAmelCase : Dict = [e == (d - 1) for e, d in zip(_lowerCamelCase , _lowerCamelCase )]
reduce_edge_list(_lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_lowerCamelCase ) == 0:
return [()]
elif len(_lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
_lowerCAmelCase : List[Tuple[slice, ...]] = []
_lowerCAmelCase : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_lowerCamelCase , _lowerCamelCase ):
if s == e:
path_list.append(slice(_lowerCamelCase , s + 1 ) )
else:
break
_lowerCAmelCase : Tuple[slice, ...] = tuple(_lowerCamelCase )
_lowerCAmelCase : str = len(_lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(_lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCAmelCase : List[Any] = start[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
_lowerCAmelCase : Tuple = end[divergence_idx]
return tuple(
path + (slice(_lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
_lowerCAmelCase : Dict = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = t.shape[:no_batch_dims]
_lowerCAmelCase : int = list(_flat_idx_to_idx(_lowerCamelCase , _lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
_lowerCAmelCase : Union[str, Any] = list(_flat_idx_to_idx(flat_end - 1 , _lowerCamelCase ) )
# Get an ordered list of slices to perform
_lowerCAmelCase : List[str] = _get_minimal_slice_set(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , )
_lowerCAmelCase : Dict = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = False , ):
'''simple docstring'''
if not (len(_lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
_lowerCAmelCase : Tuple = [shape[:no_batch_dims] for shape in _fetch_dims(_lowerCamelCase )]
_lowerCAmelCase : Union[str, Any] = tuple([max(_lowerCamelCase ) for s in zip(*_lowerCamelCase )] )
def _prep_inputs(_lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
_lowerCAmelCase : int = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
_lowerCAmelCase : Dict = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
_lowerCAmelCase : List[Any] = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
_lowerCAmelCase : Dict[str, Any] = tensor_tree_map(_prep_inputs , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = None
if _out is not None:
_lowerCAmelCase : List[Any] = tensor_tree_map(lambda _lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
_lowerCAmelCase : Union[str, Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
_lowerCAmelCase : Union[str, Any] = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
_lowerCAmelCase : int = 0
_lowerCAmelCase : Optional[int] = prepped_outputs
for _ in range(_lowerCamelCase ):
# Chunk the input
if not low_mem:
_lowerCAmelCase : Dict = _select_chunk
else:
_lowerCAmelCase : Any = partial(
_chunk_slice , flat_start=_lowerCamelCase , flat_end=min(_lowerCamelCase , i + chunk_size ) , no_batch_dims=len(_lowerCamelCase ) , )
_lowerCAmelCase : Dict[str, Any] = tensor_tree_map(_lowerCamelCase , _lowerCamelCase )
# Run the layer on the chunk
_lowerCAmelCase : Union[str, Any] = layer(**_lowerCamelCase )
# Allocate space for the output
if out is None:
_lowerCAmelCase : str = tensor_tree_map(lambda _lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(_lowerCamelCase , _lowerCamelCase ):
def assign(_lowerCamelCase , _lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
assign(_lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
_lowerCAmelCase : Any = da[k]
assign(_lowerCamelCase , _lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
for xa, xa in zip(_lowerCamelCase , _lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
_lowerCAmelCase : Dict = xa
elif isinstance(_lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
_lowerCAmelCase : List[Any] = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
_lowerCAmelCase : Dict = tensor_tree_map(lambda _lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , _lowerCamelCase )
return out
class UpperCAmelCase_ :
def __init__( self, __a = 512, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = max_chunk_size
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Optional[tuple] = None
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
logging.info("Tuning chunk size...")
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
_lowerCAmelCase : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size, 2)) + 1)]
_lowerCAmelCase : Dict = [c for c in candidates if c > min_chunk_size]
_lowerCAmelCase : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__a) -> bool:
try:
with torch.no_grad():
fn(*__a, chunk_size=__a)
return True
except RuntimeError:
return False
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Tuple = len(__a) - 1
while i > min_viable_chunk_size_index:
_lowerCAmelCase : int = test_chunk_size(candidates[i])
if not viable:
_lowerCAmelCase : Tuple = (min_viable_chunk_size_index + i) // 2
else:
_lowerCAmelCase : Optional[Any] = i
_lowerCAmelCase : Optional[Any] = (i + len(__a) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = True
for aa, aa in zip(__a, __a):
assert type(__a) == type(__a)
if isinstance(__a, (list, tuple)):
consistent &= self._compare_arg_caches(__a, __a)
elif isinstance(__a, __a):
_lowerCAmelCase : Any = [v for _, v in sorted(aa.items(), key=lambda __a: x[0])]
_lowerCAmelCase : Optional[Any] = [v for _, v in sorted(aa.items(), key=lambda __a: x[0])]
consistent &= self._compare_arg_caches(__a, __a)
else:
consistent &= aa == aa
return consistent
def snake_case__ ( self, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : tuple = tree_map(lambda __a: a.shape if isinstance(__a, torch.Tensor) else a, __a, __a)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(__a)
_lowerCAmelCase : Dict = self._compare_arg_caches(self.cached_arg_data, __a)
else:
# Otherwise, we can reuse the precomputed value
_lowerCAmelCase : Dict = False
if not consistent:
_lowerCAmelCase : Union[str, Any] = self._determine_favorable_chunk_size(
__a, __a, __a, )
_lowerCAmelCase : Optional[Any] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import heapq as hq
import math
from collections.abc import Iterator
class UpperCAmelCase_ :
def __init__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = str(id_)
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Tuple = {} # {vertex:distance}
def __lt__( self, __a):
'''simple docstring'''
return self.key < other.key
def __repr__( self):
'''simple docstring'''
return self.id
def snake_case__ ( self, __a):
'''simple docstring'''
self.neighbors.append(__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = weight
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowerCamelCase )
graph[b - 1].add_edge(graph[a - 1] , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
for u in graph:
_lowerCAmelCase : List[Any] = math.inf
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = graph[:]
while q:
_lowerCAmelCase : Any = min(_lowerCamelCase )
q.remove(_lowerCamelCase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
_lowerCAmelCase : Union[str, Any] = u
_lowerCAmelCase : str = u.edges[v.id]
for i in range(1 , len(_lowerCamelCase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for u in graph:
_lowerCAmelCase : str = math.inf
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Any = 0
_lowerCAmelCase : Any = list(_lowerCamelCase )
hq.heapify(_lowerCamelCase )
while h:
_lowerCAmelCase : List[Any] = hq.heappop(_lowerCamelCase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
_lowerCAmelCase : str = u
_lowerCAmelCase : Dict = u.edges[v.id]
hq.heapify(_lowerCamelCase )
for i in range(1 , len(_lowerCamelCase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def A ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
lowerCamelCase__ = LEDConfig
lowerCamelCase__ = {}
lowerCamelCase__ = 'gelu'
def __init__( self, __a, __a=13, __a=7, __a=True, __a=False, __a=99, __a=32, __a=2, __a=4, __a=37, __a=0.1, __a=0.1, __a=20, __a=2, __a=1, __a=0, __a=4, ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Union[str, Any] = batch_size
_lowerCAmelCase : Optional[Any] = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : str = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : List[str] = eos_token_id
_lowerCAmelCase : Optional[int] = pad_token_id
_lowerCAmelCase : str = bos_token_id
_lowerCAmelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_lowerCAmelCase : Dict = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_lowerCAmelCase : Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
_lowerCAmelCase : Dict = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
_lowerCAmelCase : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1)
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowerCAmelCase : Optional[int] = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, attention_window=self.attention_window, **self.config_updates, )
_lowerCAmelCase : Optional[int] = prepare_led_inputs_dict(__a, __a, __a)
_lowerCAmelCase : List[str] = tf.concat(
[tf.zeros_like(__a)[:, :-1], tf.ones_like(__a)[:, -1:]], axis=-1, )
_lowerCAmelCase : str = global_attention_mask
return config, inputs_dict
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = TFLEDModel(config=__a).get_decoder()
_lowerCAmelCase : int = inputs_dict["input_ids"]
_lowerCAmelCase : Tuple = input_ids[:1, :]
_lowerCAmelCase : Union[str, Any] = inputs_dict["attention_mask"][:1, :]
_lowerCAmelCase : Any = 1
# first forward pass
_lowerCAmelCase : Tuple = model(__a, attention_mask=__a, use_cache=__a)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowerCAmelCase : List[str] = ids_tensor((self.batch_size, 3), config.vocab_size)
_lowerCAmelCase : Tuple = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.inta)
# append to next input_ids and
_lowerCAmelCase : Dict = tf.concat([input_ids, next_tokens], axis=-1)
_lowerCAmelCase : str = tf.concat([attention_mask, next_attn_mask], axis=-1)
_lowerCAmelCase : str = model(__a, attention_mask=__a)[0]
_lowerCAmelCase : Optional[int] = model(__a, attention_mask=__a, past_key_values=__a)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
_lowerCAmelCase : Dict = int(ids_tensor((1,), output_from_past.shape[-1]))
_lowerCAmelCase : str = output_from_no_past[:, -3:, random_slice_idx]
_lowerCAmelCase : Any = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a, __a, rtol=1E-3)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , _lowerCamelCase=None , ):
'''simple docstring'''
if attention_mask is None:
_lowerCAmelCase : Dict = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowerCAmelCase : Optional[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowerCAmelCase : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowerCAmelCase : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
lowerCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFLEDModelTester(self)
_lowerCAmelCase : Any = ConfigTester(self, config_class=__a)
def snake_case__ ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : Dict = tf.zeros_like(inputs_dict["attention_mask"])
_lowerCAmelCase : Tuple = 2
_lowerCAmelCase : Tuple = tf.where(
tf.range(self.model_tester.seq_length)[None, :] < num_global_attn_indices, 1, inputs_dict["global_attention_mask"], )
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Any = self.model_tester.seq_length
_lowerCAmelCase : Any = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a):
_lowerCAmelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], )
def check_encoder_attentions_output(__a):
_lowerCAmelCase : List[str] = [t.numpy() for t in outputs.encoder_attentions]
_lowerCAmelCase : int = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertEqual(len(__a), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_length, seq_length], )
self.assertListEqual(
list(global_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices], )
for model_class in self.all_model_classes:
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : int = False
_lowerCAmelCase : Dict = False
_lowerCAmelCase : Optional[int] = model_class(__a)
_lowerCAmelCase : Dict = model(self._prepare_for_class(__a, __a))
_lowerCAmelCase : str = len(__a)
self.assertEqual(config.output_hidden_states, __a)
check_encoder_attentions_output(__a)
if self.is_encoder_decoder:
_lowerCAmelCase : Optional[Any] = model_class(__a)
_lowerCAmelCase : Union[str, Any] = model(self._prepare_for_class(__a, __a))
self.assertEqual(config.output_hidden_states, __a)
check_decoder_attentions_output(__a)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase : Any = True
_lowerCAmelCase : int = model_class(__a)
_lowerCAmelCase : Optional[int] = model(self._prepare_for_class(__a, __a))
self.assertEqual(config.output_hidden_states, __a)
check_encoder_attentions_output(__a)
# Check attention is always last and order is fine
_lowerCAmelCase : int = True
_lowerCAmelCase : Any = True
_lowerCAmelCase : Dict = model_class(__a)
_lowerCAmelCase : Optional[int] = model(self._prepare_for_class(__a, __a))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(__a))
self.assertEqual(model.config.output_hidden_states, __a)
check_encoder_attentions_output(__a)
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing.")
def snake_case__ ( self):
'''simple docstring'''
pass
def snake_case__ ( self):
'''simple docstring'''
pass
def A ( _lowerCamelCase ):
'''simple docstring'''
return tf.constant(_lowerCamelCase , dtype=tf.intaa )
_snake_case = 1e-4
@slow
@require_tf
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384").led
# change to intended input here
_lowerCAmelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
_lowerCAmelCase : Optional[int] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
_lowerCAmelCase : Dict = prepare_led_inputs_dict(model.config, __a, __a)
_lowerCAmelCase : Optional[Any] = model(**__a)[0]
_lowerCAmelCase : Any = (1, 1024, 768)
self.assertEqual(output.shape, __a)
# change to expected output here
_lowerCAmelCase : str = tf.convert_to_tensor(
[[2.3_050, 2.8_279, 0.6_531], [-1.8_457, -0.1_455, -3.5_661], [-1.0_186, 0.4_586, -2.2_043]], )
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-3)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384")
# change to intended input here
_lowerCAmelCase : List[str] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
_lowerCAmelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]])
_lowerCAmelCase : Optional[int] = prepare_led_inputs_dict(model.config, __a, __a)
_lowerCAmelCase : Optional[Any] = model(**__a)[0]
_lowerCAmelCase : Tuple = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape, __a)
# change to expected output here
_lowerCAmelCase : Any = tf.convert_to_tensor(
[[33.6_507, 6.4_572, 16.8_089], [5.8_739, -2.4_238, 11.2_902], [-3.2_139, -4.3_149, 4.2_783]], )
tf.debugging.assert_near(output[:, :3, :3], __a, atol=1E-3, rtol=1E-3)
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ ( enum.Enum):
lowerCamelCase__ = 0
lowerCamelCase__ = 1
lowerCamelCase__ = 2
@add_end_docstrings(a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self, *__a, **__a):
'''simple docstring'''
super().__init__(*__a, **__a)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_lowerCAmelCase : Tuple = None
if self.model.config.prefix is not None:
_lowerCAmelCase : Union[str, Any] = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_lowerCAmelCase : Any = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=__a, **self._forward_params)
_lowerCAmelCase : str = {**self._preprocess_params, **preprocess_params}
_lowerCAmelCase : str = {**self._forward_params, **forward_params}
def snake_case__ ( self, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, __a=None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = {}
if prefix is not None:
_lowerCAmelCase : str = prefix
if prefix:
_lowerCAmelCase : Dict = self.tokenizer(
__a, padding=__a, add_special_tokens=__a, return_tensors=self.framework)
_lowerCAmelCase : Any = prefix_inputs["input_ids"].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
" [None, 'hole']")
_lowerCAmelCase : int = handle_long_generation
preprocess_params.update(__a)
_lowerCAmelCase : List[str] = generate_kwargs
_lowerCAmelCase : Dict = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
if return_tensors is not None:
raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
_lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
_lowerCAmelCase : Any = ReturnType.TENSORS
if return_type is not None:
_lowerCAmelCase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowerCAmelCase : str = self.tokenizer.encode(__a, add_special_tokens=__a)
if len(__a) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim.")
_lowerCAmelCase : str = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({"add_space_before_punct_symbol": True})
return super()._parse_and_tokenize(*__a, **__a)
def __call__( self, __a, **__a):
'''simple docstring'''
return super().__call__(__a, **__a)
def snake_case__ ( self, __a, __a="", __a=None, **__a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.tokenizer(
prefix + prompt_text, padding=__a, add_special_tokens=__a, return_tensors=self.framework)
_lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
_lowerCAmelCase : List[Any] = inputs["input_ids"].shape[-1]
if "max_new_tokens" in generate_kwargs:
_lowerCAmelCase : Tuple = generate_kwargs["max_new_tokens"]
else:
_lowerCAmelCase : int = generate_kwargs.get("max_length", self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError("We cannot infer how many new tokens are expected")
if cur_len + new_tokens > self.tokenizer.model_max_length:
_lowerCAmelCase : str = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
"We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
" models max length")
_lowerCAmelCase : Dict = inputs["input_ids"][:, -keep_length:]
if "attention_mask" in inputs:
_lowerCAmelCase : Optional[Any] = inputs["attention_mask"][:, -keep_length:]
return inputs
def snake_case__ ( self, __a, **__a):
'''simple docstring'''
_lowerCAmelCase : Tuple = model_inputs["input_ids"]
_lowerCAmelCase : Dict = model_inputs.get("attention_mask", __a)
# Allow empty prompts
if input_ids.shape[1] == 0:
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = 1
else:
_lowerCAmelCase : str = input_ids.shape[0]
_lowerCAmelCase : List[str] = model_inputs.pop("prompt_text")
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_lowerCAmelCase : str = generate_kwargs.pop("prefix_length", 0)
if prefix_length > 0:
_lowerCAmelCase : List[str] = "max_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].max_new_tokens is not None
)
if not has_max_new_tokens:
_lowerCAmelCase : Optional[int] = generate_kwargs.get("max_length") or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_lowerCAmelCase : Dict = "min_new_tokens" in generate_kwargs or (
"generation_config" in generate_kwargs
and generate_kwargs["generation_config"].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_lowerCAmelCase : Optional[Any] = self.model.generate(input_ids=__a, attention_mask=__a, **__a)
_lowerCAmelCase : Tuple = generated_sequence.shape[0]
if self.framework == "pt":
_lowerCAmelCase : Dict = generated_sequence.reshape(__a, out_b // in_b, *generated_sequence.shape[1:])
elif self.framework == "tf":
_lowerCAmelCase : Union[str, Any] = tf.reshape(__a, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case__ ( self, __a, __a=ReturnType.FULL_TEXT, __a=True):
'''simple docstring'''
_lowerCAmelCase : int = model_outputs["generated_sequence"][0]
_lowerCAmelCase : Union[str, Any] = model_outputs["input_ids"]
_lowerCAmelCase : int = model_outputs["prompt_text"]
_lowerCAmelCase : Tuple = generated_sequence.numpy().tolist()
_lowerCAmelCase : int = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_lowerCAmelCase : int = {"generated_token_ids": sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_lowerCAmelCase : Optional[int] = self.tokenizer.decode(
__a, skip_special_tokens=__a, clean_up_tokenization_spaces=__a, )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_lowerCAmelCase : int = 0
else:
_lowerCAmelCase : Optional[Any] = len(
self.tokenizer.decode(
input_ids[0], skip_special_tokens=__a, clean_up_tokenization_spaces=__a, ))
if return_type == ReturnType.FULL_TEXT:
_lowerCAmelCase : List[str] = prompt_text + text[prompt_length:]
else:
_lowerCAmelCase : List[Any] = text[prompt_length:]
_lowerCAmelCase : List[Any] = {"generated_text": all_text}
records.append(__a)
return records
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
from __future__ import annotations
import os
from typing import Any
import requests
_snake_case = "https://api.github.com"
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
_snake_case = BASE_URL + "/user"
# https://github.com/settings/tokens
_snake_case = os.environ.get("USER_TOKEN", "")
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = {
"Authorization": F"token {auth_token}",
"Accept": "application/vnd.github.v3+json",
}
return requests.get(_lowerCamelCase , headers=_lowerCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'''{key}: {value}''')
else:
raise ValueError("'USER_TOKEN' field cannot be empty.")
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("repo_id" , ["canonical_dataset_name", "org-name/dataset-name"] )
@pytest.mark.parametrize("path" , ["filename.csv", "filename with blanks.csv"] )
@pytest.mark.parametrize("revision" , [None, "v2"] )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = hf_hub_url(repo_id=_lowerCamelCase , path=_lowerCamelCase , revision=_lowerCamelCase )
assert url == F"https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(_lowerCamelCase )}"
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( a , a , a):
lowerCamelCase__ = [r'h\.\d+\.attn\.bias', r'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self, __a, __a, __a = None, __a = 5_0257, __a = 1024, __a = 768, __a = 12, __a = 12, __a = None, __a = "gelu_new", __a = 0.1, __a = 0.1, __a = 0.1, __a = 1E-5, __a = 0.02, __a = True, __a = True, __a = False, __a = False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal.")
_lowerCAmelCase : int = prefix_inner_dim
_lowerCAmelCase : List[str] = prefix_hidden_dim
_lowerCAmelCase : List[Any] = (
nn.Linear(self.prefix_inner_dim, self.prefix_hidden_dim)
if self.prefix_hidden_dim is not None
else nn.Identity()
)
_lowerCAmelCase : Union[str, Any] = (
nn.Linear(self.prefix_hidden_dim, __a) if self.prefix_hidden_dim is not None else nn.Identity()
)
_lowerCAmelCase : Union[str, Any] = GPTaConfig(
vocab_size=__a, n_positions=__a, n_embd=__a, n_layer=__a, n_head=__a, n_inner=__a, activation_function=__a, resid_pdrop=__a, embd_pdrop=__a, attn_pdrop=__a, layer_norm_epsilon=__a, initializer_range=__a, scale_attn_weights=__a, use_cache=__a, scale_attn_by_inverse_layer_idx=__a, reorder_and_upcast_attn=__a, )
_lowerCAmelCase : List[str] = GPTaLMHeadModel(__a)
def snake_case__ ( self, __a, __a, __a = None, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.transformer.transformer.wte(__a)
_lowerCAmelCase : Any = self.encode_prefix(__a)
_lowerCAmelCase : List[Any] = self.decode_prefix(__a)
_lowerCAmelCase : int = torch.cat((prefix_embeds, embedding_text), dim=1)
if labels is not None:
_lowerCAmelCase : Optional[int] = self.get_dummy_token(input_ids.shape[0], input_ids.device)
_lowerCAmelCase : Union[str, Any] = torch.cat((dummy_token, input_ids), dim=1)
_lowerCAmelCase : Any = self.transformer(inputs_embeds=__a, labels=__a, attention_mask=__a)
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return torch.zeros(__a, self.prefix_length, dtype=torch.intaa, device=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.encode_prefix(__a)
@torch.no_grad()
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : int = torch.split(__a, 1, dim=0)
_lowerCAmelCase : int = []
_lowerCAmelCase : List[str] = []
for feature in features:
_lowerCAmelCase : Dict = self.decode_prefix(feature.to(__a)) # back to the clip feature
# Only support beam search for now
_lowerCAmelCase , _lowerCAmelCase : List[str] = self.generate_beam(
input_embeds=__a, device=__a, eos_token_id=__a)
generated_tokens.append(output_tokens[0])
generated_seq_lengths.append(seq_lengths[0])
_lowerCAmelCase : List[Any] = torch.stack(__a)
_lowerCAmelCase : Any = torch.stack(__a)
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def snake_case__ ( self, __a=None, __a=None, __a=None, __a = 5, __a = 67, __a = 1.0, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Tuple = eos_token_id
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Union[str, Any] = torch.ones(__a, device=__a, dtype=torch.int)
_lowerCAmelCase : Dict = torch.zeros(__a, device=__a, dtype=torch.bool)
if input_embeds is not None:
_lowerCAmelCase : int = input_embeds
else:
_lowerCAmelCase : Any = self.transformer.transformer.wte(__a)
for i in range(__a):
_lowerCAmelCase : List[Any] = self.transformer(inputs_embeds=__a)
_lowerCAmelCase : Dict = outputs.logits
_lowerCAmelCase : Optional[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
_lowerCAmelCase : int = logits.softmax(-1).log()
if scores is None:
_lowerCAmelCase , _lowerCAmelCase : int = logits.topk(__a, -1)
_lowerCAmelCase : List[Any] = generated.expand(__a, *generated.shape[1:])
_lowerCAmelCase , _lowerCAmelCase : List[str] = next_tokens.permute(1, 0), scores.squeeze(0)
if tokens is None:
_lowerCAmelCase : Union[str, Any] = next_tokens
else:
_lowerCAmelCase : int = tokens.expand(__a, *tokens.shape[1:])
_lowerCAmelCase : Union[str, Any] = torch.cat((tokens, next_tokens), dim=1)
else:
_lowerCAmelCase : str = -float(np.inf)
_lowerCAmelCase : Optional[Any] = 0
_lowerCAmelCase : int = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
_lowerCAmelCase : Any = scores_sum / seq_lengths[:, None]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = scores_sum_average.view(-1).topk(__a, -1)
_lowerCAmelCase : Optional[Any] = next_tokens // scores_sum.shape[1]
_lowerCAmelCase : Tuple = seq_lengths[next_tokens_source]
_lowerCAmelCase : List[Any] = next_tokens % scores_sum.shape[1]
_lowerCAmelCase : List[Any] = next_tokens.unsqueeze(1)
_lowerCAmelCase : Any = tokens[next_tokens_source]
_lowerCAmelCase : str = torch.cat((tokens, next_tokens), dim=1)
_lowerCAmelCase : List[Any] = generated[next_tokens_source]
_lowerCAmelCase : Dict = scores_sum_average * seq_lengths
_lowerCAmelCase : Any = is_stopped[next_tokens_source]
_lowerCAmelCase : int = self.transformer.transformer.wte(next_tokens.squeeze()).view(generated.shape[0], 1, -1)
_lowerCAmelCase : Dict = torch.cat((generated, next_token_embed), dim=1)
_lowerCAmelCase : List[str] = is_stopped + next_tokens.eq(__a).squeeze()
if is_stopped.all():
break
_lowerCAmelCase : Union[str, Any] = scores / seq_lengths
_lowerCAmelCase : Union[str, Any] = scores.argsort(descending=__a)
# tokens tensors are already padded to max_seq_length
_lowerCAmelCase : Optional[int] = [tokens[i] for i in order]
_lowerCAmelCase : Dict = torch.stack(__a, dim=0)
_lowerCAmelCase : Optional[int] = torch.tensor([seq_lengths[i] for i in order], dtype=seq_lengths.dtype)
return output_texts, seq_lengths
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if number > 0:
raise ValueError("input must be a negative integer" )
_lowerCAmelCase : int = len(bin(_lowerCamelCase )[3:] )
_lowerCAmelCase : Any = bin(abs(_lowerCamelCase ) - (1 << binary_number_length) )[3:]
_lowerCAmelCase : Optional[Any] = (
(
"1"
+ "0" * (binary_number_length - len(_lowerCamelCase ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a = None, __a = None, __a = None, __a = False, __a = False, __a = None, __a = None, **__a, ):
'''simple docstring'''
super().__init__(
__a, split=__a, features=__a, cache_dir=__a, keep_in_memory=__a, streaming=__a, num_proc=__a, **__a, )
_lowerCAmelCase : List[Any] = field
_lowerCAmelCase : Tuple = path_or_paths if isinstance(__a, __a) else {self.split: path_or_paths}
_lowerCAmelCase : str = Json(
cache_dir=__a, data_files=__a, features=__a, field=__a, **__a, )
def snake_case__ ( self):
'''simple docstring'''
if self.streaming:
_lowerCAmelCase : List[Any] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : str = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=__a, download_mode=__a, verification_mode=__a, base_path=__a, num_proc=self.num_proc, )
_lowerCAmelCase : Optional[Any] = self.builder.as_dataset(
split=self.split, verification_mode=__a, in_memory=self.keep_in_memory)
return dataset
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = None, __a = None, **__a, ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0.")
_lowerCAmelCase : int = dataset
_lowerCAmelCase : Dict = path_or_buf
_lowerCAmelCase : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCAmelCase : int = num_proc
_lowerCAmelCase : Dict = "utf-8"
_lowerCAmelCase : List[str] = to_json_kwargs
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.to_json_kwargs.pop("path_or_buf", __a)
_lowerCAmelCase : List[Any] = self.to_json_kwargs.pop("orient", "records")
_lowerCAmelCase : int = self.to_json_kwargs.pop("lines", True if orient == "records" else False)
_lowerCAmelCase : List[Any] = self.to_json_kwargs.pop("index", False if orient in ["split", "table"] else True)
_lowerCAmelCase : Dict = self.to_json_kwargs.pop("compression", __a)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"`datasets` currently does not support {compression} compression")
if isinstance(self.path_or_buf, (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf, "wb", compression=__a) as buffer:
_lowerCAmelCase : Dict = self._write(file_obj=__a, orient=__a, lines=__a, index=__a, **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
f"The compression parameter is not supported when writing to a buffer, but compression={compression}"
" was passed. Please provide a local path instead.")
_lowerCAmelCase : Tuple = self._write(
file_obj=self.path_or_buf, orient=__a, lines=__a, index=__a, **self.to_json_kwargs)
return written
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = args
_lowerCAmelCase : Optional[int] = query_table(
table=self.dataset.data, key=slice(__a, offset + self.batch_size), indices=self.dataset._indices, )
_lowerCAmelCase : str = batch.to_pandas().to_json(
path_or_buf=__a, orient=__a, lines=__a, index=__a, **__a)
if not json_str.endswith("\n"):
json_str += "\n"
return json_str.encode(self.encoding)
def snake_case__ ( self, __a, __a, __a, __a, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0, len(self.dataset), self.batch_size), unit="ba", disable=not logging.is_progress_bar_enabled(), desc="Creating json from Arrow format", ):
_lowerCAmelCase : Any = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(__a)
else:
_lowerCAmelCase , _lowerCAmelCase : Tuple = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json, [(offset, orient, lines, index, to_json_kwargs) for offset in range(0, __a, __a)], ), total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size, unit="ba", disable=not logging.is_progress_bar_enabled(), desc="Creating json from Arrow format", ):
written += file_obj.write(__a)
return written
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
_snake_case = logging.get_logger(__name__)
@dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys())})
lowerCamelCase__ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'})
lowerCamelCase__ = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCamelCase__ = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.task_name.lower()
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'train'
lowerCamelCase__ = 'dev'
lowerCamelCase__ = 'test'
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self, __a, __a, __a = None, __a = Split.train, __a = None, ):
'''simple docstring'''
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py", __a, )
_lowerCAmelCase : Optional[Any] = args
_lowerCAmelCase : Tuple = glue_processors[args.task_name]()
_lowerCAmelCase : Union[str, Any] = glue_output_modes[args.task_name]
if isinstance(__a, __a):
try:
_lowerCAmelCase : Union[str, Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name")
# Load data features from cache or dataset file
_lowerCAmelCase : Optional[Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir, f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}", )
_lowerCAmelCase : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_lowerCAmelCase , _lowerCAmelCase : Tuple = label_list[2], label_list[1]
_lowerCAmelCase : Dict = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_lowerCAmelCase : List[str] = cached_features_file + ".lock"
with FileLock(__a):
if os.path.exists(__a) and not args.overwrite_cache:
_lowerCAmelCase : Dict = time.time()
_lowerCAmelCase : Union[str, Any] = torch.load(__a)
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start)
else:
logger.info(f"Creating features from dataset file at {args.data_dir}")
if mode == Split.dev:
_lowerCAmelCase : Tuple = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
_lowerCAmelCase : str = self.processor.get_test_examples(args.data_dir)
else:
_lowerCAmelCase : Optional[Any] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
_lowerCAmelCase : Any = examples[:limit_length]
_lowerCAmelCase : Any = glue_convert_examples_to_features(
__a, __a, max_length=args.max_seq_length, label_list=__a, output_mode=self.output_mode, )
_lowerCAmelCase : int = time.time()
torch.save(self.features, __a)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]")
def __len__( self):
'''simple docstring'''
return len(self.features)
def __getitem__( self, __a):
'''simple docstring'''
return self.features[i]
def snake_case__ ( self):
'''simple docstring'''
return self.label_list
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = BertTokenizer
lowerCamelCase__ = BertTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = True
lowerCamelCase__ = filter_non_english
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Tuple = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = "UNwant\u00E9d,running"
_lowerCAmelCase : Dict = "unwanted, running"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file)
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(__a, ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), [9, 6, 7, 12, 10, 11])
def snake_case__ ( self):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[int] = "UNwant\u00E9d,running"
_lowerCAmelCase : List[str] = tokenizer.tokenize(__a)
_lowerCAmelCase : Dict = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[str] = tokenizer.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : str = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Optional[int] = tokenizer.encode(__a)
_lowerCAmelCase : Any = rust_tokenizer.encode(__a)
self.assertListEqual(__a, __a)
# With lower casing
_lowerCAmelCase : Optional[Any] = self.get_tokenizer(do_lower_case=__a)
_lowerCAmelCase : Dict = self.get_rust_tokenizer(do_lower_case=__a)
_lowerCAmelCase : Union[str, Any] = "UNwant\u00E9d,running"
_lowerCAmelCase : Optional[Any] = tokenizer.tokenize(__a)
_lowerCAmelCase : List[str] = rust_tokenizer.tokenize(__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[str] = tokenizer.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : List[str] = rust_tokenizer.encode(__a, add_special_tokens=__a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : List[Any] = tokenizer.encode(__a)
_lowerCAmelCase : Dict = rust_tokenizer.encode(__a)
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = BasicTokenizer(do_lower_case=__a, strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=__a, strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"])
self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BasicTokenizer(do_lower_case=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=__a, strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = BasicTokenizer(do_lower_case=__a, strip_accents=__a)
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = BasicTokenizer(do_lower_case=__a, never_split=["[UNK]"])
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = BasicTokenizer()
_lowerCAmelCase : Optional[int] = "a\n'll !!to?'d of, can't."
_lowerCAmelCase : int = ["a", "'", "ll", "!", "!", "to", "?", "'", "d", "of", ",", "can", "'", "t", "."]
self.assertListEqual(tokenizer.tokenize(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_lowerCAmelCase : int = {}
for i, token in enumerate(__a):
_lowerCAmelCase : Dict = i
_lowerCAmelCase : Tuple = WordpieceTokenizer(vocab=__a, unk_token="[UNK]")
self.assertListEqual(tokenizer.tokenize(""), [])
self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"])
self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"])
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(_is_whitespace(" "))
self.assertTrue(_is_whitespace("\t"))
self.assertTrue(_is_whitespace("\r"))
self.assertTrue(_is_whitespace("\n"))
self.assertTrue(_is_whitespace("\u00A0"))
self.assertFalse(_is_whitespace("A"))
self.assertFalse(_is_whitespace("-"))
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(_is_control("\u0005"))
self.assertFalse(_is_control("A"))
self.assertFalse(_is_control(" "))
self.assertFalse(_is_control("\t"))
self.assertFalse(_is_control("\r"))
def snake_case__ ( self):
'''simple docstring'''
self.assertTrue(_is_punctuation("-"))
self.assertTrue(_is_punctuation("$"))
self.assertTrue(_is_punctuation("`"))
self.assertTrue(_is_punctuation("."))
self.assertFalse(_is_punctuation("A"))
self.assertFalse(_is_punctuation(" "))
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : str = self.get_tokenizer()
_lowerCAmelCase : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]])
self.assertListEqual(
[rust_tokenizer.tokenize(__a) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]])
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained("bert-base-uncased")
_lowerCAmelCase : str = tokenizer.encode("sequence builders", add_special_tokens=__a)
_lowerCAmelCase : List[str] = tokenizer.encode("multi-sequence build", add_special_tokens=__a)
_lowerCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__a)
_lowerCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(__a, __a)
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case__ ( self):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : str = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
_lowerCAmelCase : Any = tokenizer_r.encode_plus(
__a, return_attention_mask=__a, return_token_type_ids=__a, return_offsets_mapping=__a, add_special_tokens=__a, )
_lowerCAmelCase : Optional[int] = tokenizer_r.do_lower_case if hasattr(__a, "do_lower_case") else False
_lowerCAmelCase : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]))
self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ["的", "人", "有"]
_lowerCAmelCase : Optional[int] = "".join(__a)
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : List[Any] = tokenizer_p.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : List[Any] = tokenizer_r.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(__a)
_lowerCAmelCase : int = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a, __a)
self.assertListEqual(__a, __a)
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(__a, **__a)
_lowerCAmelCase : Optional[int] = tokenizer_r.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : Any = tokenizer_p.encode(__a, add_special_tokens=__a)
_lowerCAmelCase : str = tokenizer_r.convert_ids_to_tokens(__a)
_lowerCAmelCase : Dict = tokenizer_p.convert_ids_to_tokens(__a)
# it is expected that only the first Chinese character is not preceded by "##".
_lowerCAmelCase : Any = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(__a)
]
self.assertListEqual(__a, __a)
self.assertListEqual(__a, __a)
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 42
lowerCamelCase__ = 42
class UpperCAmelCase_ ( nn.Module):
lowerCamelCase__ = 42
lowerCamelCase__ = (16, 32, 96, 256)
lowerCamelCase__ = jnp.floataa
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = nn.Conv(
self.block_out_channels[0], kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
_lowerCAmelCase : List[Any] = []
for i in range(len(self.block_out_channels) - 1):
_lowerCAmelCase : List[Any] = self.block_out_channels[i]
_lowerCAmelCase : Dict = self.block_out_channels[i + 1]
_lowerCAmelCase : Any = nn.Conv(
__a, kernel_size=(3, 3), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(__a)
_lowerCAmelCase : Any = nn.Conv(
__a, kernel_size=(3, 3), strides=(2, 2), padding=((1, 1), (1, 1)), dtype=self.dtype, )
blocks.append(__a)
_lowerCAmelCase : Tuple = blocks
_lowerCAmelCase : Union[str, Any] = nn.Conv(
self.conditioning_embedding_channels, kernel_size=(3, 3), padding=((1, 1), (1, 1)), kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.conv_in(__a)
_lowerCAmelCase : str = nn.silu(__a)
for block in self.blocks:
_lowerCAmelCase : str = block(__a)
_lowerCAmelCase : Dict = nn.silu(__a)
_lowerCAmelCase : str = self.conv_out(__a)
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , a , a):
lowerCamelCase__ = 32
lowerCamelCase__ = 4
lowerCamelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase__ = False
lowerCamelCase__ = (320, 640, 1280, 1280)
lowerCamelCase__ = 2
lowerCamelCase__ = 8
lowerCamelCase__ = None
lowerCamelCase__ = 1280
lowerCamelCase__ = 0.0
lowerCamelCase__ = False
lowerCamelCase__ = jnp.floataa
lowerCamelCase__ = True
lowerCamelCase__ = 0
lowerCamelCase__ = "rgb"
lowerCamelCase__ = (16, 32, 96, 256)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
_lowerCAmelCase : Dict = jnp.zeros(__a, dtype=jnp.floataa)
_lowerCAmelCase : Any = jnp.ones((1,), dtype=jnp.intaa)
_lowerCAmelCase : int = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.floataa)
_lowerCAmelCase : List[Any] = (1, 3, self.sample_size * 8, self.sample_size * 8)
_lowerCAmelCase : List[str] = jnp.zeros(__a, dtype=jnp.floataa)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = jax.random.split(__a)
_lowerCAmelCase : List[str] = {"params": params_rng, "dropout": dropout_rng}
return self.init(__a, __a, __a, __a, __a)["params"]
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.block_out_channels
_lowerCAmelCase : Any = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_lowerCAmelCase : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
_lowerCAmelCase : Dict = nn.Conv(
block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, )
# time
_lowerCAmelCase : List[str] = FlaxTimesteps(
block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift)
_lowerCAmelCase : Tuple = FlaxTimestepEmbedding(__a, dtype=self.dtype)
_lowerCAmelCase : List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0], block_out_channels=self.conditioning_embedding_out_channels, )
_lowerCAmelCase : List[str] = self.only_cross_attention
if isinstance(__a, __a):
_lowerCAmelCase : List[str] = (only_cross_attention,) * len(self.down_block_types)
if isinstance(__a, __a):
_lowerCAmelCase : Any = (num_attention_heads,) * len(self.down_block_types)
# down
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Any = block_out_channels[0]
_lowerCAmelCase : Any = nn.Conv(
__a, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(__a)
for i, down_block_type in enumerate(self.down_block_types):
_lowerCAmelCase : Optional[Any] = output_channel
_lowerCAmelCase : Optional[int] = block_out_channels[i]
_lowerCAmelCase : List[Any] = i == len(__a) - 1
if down_block_type == "CrossAttnDownBlock2D":
_lowerCAmelCase : Dict = FlaxCrossAttnDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, num_attention_heads=num_attention_heads[i], add_downsample=not is_final_block, use_linear_projection=self.use_linear_projection, only_cross_attention=only_cross_attention[i], dtype=self.dtype, )
else:
_lowerCAmelCase : Optional[Any] = FlaxDownBlockaD(
in_channels=__a, out_channels=__a, dropout=self.dropout, num_layers=self.layers_per_block, add_downsample=not is_final_block, dtype=self.dtype, )
down_blocks.append(__a)
for _ in range(self.layers_per_block):
_lowerCAmelCase : str = nn.Conv(
__a, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(__a)
if not is_final_block:
_lowerCAmelCase : int = nn.Conv(
__a, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
controlnet_down_blocks.append(__a)
_lowerCAmelCase : Optional[int] = down_blocks
_lowerCAmelCase : int = controlnet_down_blocks
# mid
_lowerCAmelCase : Dict = block_out_channels[-1]
_lowerCAmelCase : Dict = FlaxUNetMidBlockaDCrossAttn(
in_channels=__a, dropout=self.dropout, num_attention_heads=num_attention_heads[-1], use_linear_projection=self.use_linear_projection, dtype=self.dtype, )
_lowerCAmelCase : str = nn.Conv(
__a, kernel_size=(1, 1), padding="VALID", kernel_init=nn.initializers.zeros_init(), bias_init=nn.initializers.zeros_init(), dtype=self.dtype, )
def __call__( self, __a, __a, __a, __a, __a = 1.0, __a = True, __a = False, ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
_lowerCAmelCase : List[str] = jnp.flip(__a, axis=1)
# 1. time
if not isinstance(__a, jnp.ndarray):
_lowerCAmelCase : List[str] = jnp.array([timesteps], dtype=jnp.intaa)
elif isinstance(__a, jnp.ndarray) and len(timesteps.shape) == 0:
_lowerCAmelCase : Optional[int] = timesteps.astype(dtype=jnp.floataa)
_lowerCAmelCase : int = jnp.expand_dims(__a, 0)
_lowerCAmelCase : Union[str, Any] = self.time_proj(__a)
_lowerCAmelCase : Optional[int] = self.time_embedding(__a)
# 2. pre-process
_lowerCAmelCase : List[str] = jnp.transpose(__a, (0, 2, 3, 1))
_lowerCAmelCase : int = self.conv_in(__a)
_lowerCAmelCase : Optional[Any] = jnp.transpose(__a, (0, 2, 3, 1))
_lowerCAmelCase : Tuple = self.controlnet_cond_embedding(__a)
sample += controlnet_cond
# 3. down
_lowerCAmelCase : Any = (sample,)
for down_block in self.down_blocks:
if isinstance(__a, __a):
_lowerCAmelCase , _lowerCAmelCase : Tuple = down_block(__a, __a, __a, deterministic=not train)
else:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = down_block(__a, __a, deterministic=not train)
down_block_res_samples += res_samples
# 4. mid
_lowerCAmelCase : Any = self.mid_block(__a, __a, __a, deterministic=not train)
# 5. contronet blocks
_lowerCAmelCase : str = ()
for down_block_res_sample, controlnet_block in zip(__a, self.controlnet_down_blocks):
_lowerCAmelCase : str = controlnet_block(__a)
controlnet_down_block_res_samples += (down_block_res_sample,)
_lowerCAmelCase : int = controlnet_down_block_res_samples
_lowerCAmelCase : List[Any] = self.controlnet_mid_block(__a)
# 6. scaling
_lowerCAmelCase : List[str] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=__a, mid_block_res_sample=__a)
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'vit_msn'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-06, __a=224, __a=16, __a=3, __a=True, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Optional[int] = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Dict = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : int = num_channels
_lowerCAmelCase : Optional[int] = qkv_bias
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
from typing import List, Optional, Union
import numpy as np
from ....audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function
from ....feature_extraction_sequence_utils import SequenceFeatureExtractor
from ....feature_extraction_utils import BatchFeature
from ....file_utils import PaddingStrategy, TensorType
from ....utils import logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['input_features', 'attention_mask']
def __init__( self, __a=80, __a=1_6000, __a=0.0, __a=10, __a=25, __a="hamming_window", __a=32_768.0, __a=0.97, __a=1.0, __a=True, __a=True, __a=False, **__a, ):
'''simple docstring'''
super().__init__(feature_size=__a, sampling_rate=__a, padding_value=__a, **__a)
_lowerCAmelCase : List[Any] = feature_size
_lowerCAmelCase : Tuple = sampling_rate
_lowerCAmelCase : List[Any] = padding_value
_lowerCAmelCase : int = hop_length
_lowerCAmelCase : str = win_length
_lowerCAmelCase : Any = frame_signal_scale
_lowerCAmelCase : str = preemphasis_coeff
_lowerCAmelCase : Optional[int] = mel_floor
_lowerCAmelCase : Optional[Any] = normalize_means
_lowerCAmelCase : List[Any] = normalize_vars
_lowerCAmelCase : Union[str, Any] = win_function
_lowerCAmelCase : Dict = return_attention_mask
_lowerCAmelCase : Union[str, Any] = win_length * sampling_rate // 1000
_lowerCAmelCase : int = hop_length * sampling_rate // 1000
_lowerCAmelCase : Optional[int] = optimal_fft_length(self.sample_size)
_lowerCAmelCase : List[str] = (self.n_fft // 2) + 1
def snake_case__ ( self, __a):
'''simple docstring'''
if self.win_function == "hamming_window":
_lowerCAmelCase : Optional[int] = window_function(window_length=self.sample_size, name=self.win_function, periodic=__a)
else:
_lowerCAmelCase : List[Any] = window_function(window_length=self.sample_size, name=self.win_function)
_lowerCAmelCase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=self.n_freqs, num_mel_filters=self.feature_size, min_frequency=0.0, max_frequency=self.sampling_rate / 2.0, sampling_rate=self.sampling_rate, )
_lowerCAmelCase : Union[str, Any] = spectrogram(
one_waveform * self.frame_signal_scale, window=__a, frame_length=self.sample_size, hop_length=self.sample_stride, fft_length=self.n_fft, center=__a, preemphasis=self.preemphasis_coeff, mel_filters=__a, mel_floor=self.mel_floor, log_mel="log", )
return msfc_features.T
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if self.normalize_means:
_lowerCAmelCase : Optional[Any] = x[:input_length].mean(axis=0)
_lowerCAmelCase : List[Any] = np.subtract(__a, __a)
if self.normalize_vars:
_lowerCAmelCase : Optional[int] = x[:input_length].std(axis=0)
_lowerCAmelCase : int = np.divide(__a, __a)
if input_length < x.shape[0]:
_lowerCAmelCase : List[Any] = padding_value
# make sure array is in float32
_lowerCAmelCase : Dict = x.astype(np.floataa)
return x
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Any = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [self._normalize_one(__a, __a, self.padding_value) for x, n in zip(__a, __a)]
def __call__( self, __a, __a = False, __a = None, __a = False, __a = None, __a = None, __a = None, __a = None, **__a, ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
f" {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"It is strongly recommended to pass the ``sampling_rate`` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
_lowerCAmelCase : str = isinstance(__a, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
_lowerCAmelCase : int = is_batched_numpy or (
isinstance(__a, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
)
if is_batched:
_lowerCAmelCase : List[Any] = [np.asarray(__a, dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(__a, np.ndarray):
_lowerCAmelCase : Dict = np.asarray(__a, dtype=np.floataa)
elif isinstance(__a, np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_lowerCAmelCase : int = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_lowerCAmelCase : Union[str, Any] = [raw_speech]
# extract fbank features
_lowerCAmelCase : Optional[int] = [self._extract_mfsc_features(__a) for one_waveform in raw_speech]
# convert into correct format for padding
_lowerCAmelCase : Any = BatchFeature({"input_features": features})
_lowerCAmelCase : Dict = self.pad(
__a, padding=__a, max_length=__a, truncation=__a, pad_to_multiple_of=__a, return_attention_mask=__a, **__a, )
# make sure list is in array format
_lowerCAmelCase : Any = padded_inputs.get("input_features")
if isinstance(input_features[0], __a):
_lowerCAmelCase : List[Any] = [np.asarray(__a, dtype=np.floataa) for feature in input_features]
_lowerCAmelCase : str = padded_inputs.get("attention_mask")
if attention_mask is not None:
_lowerCAmelCase : Dict = [np.asarray(__a, dtype=np.intaa) for array in attention_mask]
if self.normalize_means or self.normalize_vars:
_lowerCAmelCase : List[Any] = (
np.array(__a, dtype=np.intaa)
if self._get_padding_strategies(__a, max_length=__a) is not PaddingStrategy.DO_NOT_PAD
and padding
else None
)
_lowerCAmelCase : str = self.normalize(
padded_inputs["input_features"], attention_mask=__a)
if return_tensors is not None:
_lowerCAmelCase : Tuple = padded_inputs.convert_to_tensors(__a)
return padded_inputs
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
_snake_case = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n"
_snake_case = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n"
_snake_case = R"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n"
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string"),
"references": datasets.Value("string"),
}), homepage="https://github.com/hendrycks/math", codebase_urls=["https://github.com/hendrycks/math"], )
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Any = 0.0
for i, j in zip(__a, __a):
n_correct += 1.0 if math_equivalence.is_equiv(__a, __a) else 0.0
_lowerCAmelCase : Dict = n_correct / len(__a)
return {
"accuracy": accuracy,
}
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowerCAmelCase : Optional[int] = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : List[Any] = 48
_lowerCAmelCase : Optional[int] = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowerCAmelCase : Optional[int] = [6, 6, 6, 6]
_lowerCAmelCase : Union[str, Any] = 60
_lowerCAmelCase : str = [6, 6, 6, 6]
_lowerCAmelCase : str = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowerCAmelCase : str = 4
_lowerCAmelCase : Optional[Any] = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Union[str, Any] = 126
_lowerCAmelCase : Optional[Any] = 7
_lowerCAmelCase : List[str] = 2_55.0
_lowerCAmelCase : Optional[int] = ""
return config
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
_lowerCAmelCase : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_lowerCAmelCase : Optional[Any] = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
_lowerCAmelCase : Optional[Any] = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
_lowerCAmelCase : int = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
_lowerCAmelCase : Any = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_lowerCAmelCase : str = name.replace("attn" , "attention.self" )
if "norm1" in name:
_lowerCAmelCase : int = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_lowerCAmelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_lowerCAmelCase : Optional[Any] = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
_lowerCAmelCase : Optional[Any] = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
_lowerCAmelCase : int = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
_lowerCAmelCase : Union[str, Any] = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
_lowerCAmelCase : Tuple = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
_lowerCAmelCase : Optional[int] = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
_lowerCAmelCase : List[str] = "layernorm.weight"
if name == "norm.bias":
_lowerCAmelCase : Optional[int] = "layernorm.bias"
if "conv_first" in name:
_lowerCAmelCase : Tuple = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
_lowerCAmelCase : Optional[int] = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
_lowerCAmelCase : Any = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
_lowerCAmelCase : Optional[int] = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
_lowerCAmelCase : int = name.replace("upsample.2" , "upsample.convolution_1" )
_lowerCAmelCase : Tuple = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
_lowerCAmelCase : int = name.replace("upsample.0.weight" , "upsample.conv.weight" )
_lowerCAmelCase : Optional[Any] = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
_lowerCAmelCase : int = "swin2sr." + name
return name
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : List[str] = orig_state_dict.pop(_lowerCamelCase )
if "qkv" in key:
_lowerCAmelCase : Union[str, Any] = key.split("." )
_lowerCAmelCase : Union[str, Any] = int(key_split[1] )
_lowerCAmelCase : Tuple = int(key_split[4] )
_lowerCAmelCase : Union[str, Any] = config.embed_dim
if "weight" in key:
_lowerCAmelCase : Union[str, Any] = val[:dim, :]
_lowerCAmelCase : int = val[dim : dim * 2, :]
_lowerCAmelCase : Tuple = val[-dim:, :]
else:
_lowerCAmelCase : str = val[:dim]
_lowerCAmelCase : Tuple = val[dim : dim * 2]
_lowerCAmelCase : Dict = val[-dim:]
pass
else:
_lowerCAmelCase : Tuple = val
return orig_state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_config(_lowerCamelCase )
_lowerCAmelCase : Any = SwinaSRForImageSuperResolution(_lowerCamelCase )
model.eval()
_lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : List[Any] = convert_state_dict(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
raise ValueError("Missing keys when converting: {}".format(_lowerCamelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F"Unexpected key {key} in state_dict" )
# verify values
_lowerCAmelCase : str = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
_lowerCAmelCase : Dict = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("RGB" )
_lowerCAmelCase : Dict = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
_lowerCAmelCase : List[Any] = 126 if "Jpeg" in checkpoint_url else 256
_lowerCAmelCase : Optional[Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
_lowerCAmelCase : List[str] = transforms(_lowerCamelCase ).unsqueeze(0 )
if config.num_channels == 1:
_lowerCAmelCase : str = pixel_values[:, 0, :, :].unsqueeze(1 )
_lowerCAmelCase : int = model(_lowerCamelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
_lowerCAmelCase : Optional[Any] = torch.Size([1, 3, 512, 512] )
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
_lowerCAmelCase : List[str] = torch.Size([1, 3, 1_024, 1_024] )
_lowerCAmelCase : Dict = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
_lowerCAmelCase : List[Any] = torch.Size([1, 3, 1_024, 1_024] )
_lowerCAmelCase : List[Any] = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
_lowerCAmelCase : str = torch.Size([1, 3, 512, 512] )
_lowerCAmelCase : Tuple = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
_lowerCAmelCase : Tuple = torch.Size([1, 3, 1_024, 1_024] )
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), F"Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , _lowerCamelCase , atol=1e-3 )
print("Looks ok!" )
_lowerCAmelCase : str = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
_lowerCAmelCase : Tuple = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
model.push_to_hub(F"caidas/{model_name}" )
processor.push_to_hub(F"caidas/{model_name}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth",
type=str,
help="URL of the original Swin2SR checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the converted model to the hub.")
_snake_case = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return x if y == 0 else greatest_common_divisor(_lowerCamelCase , x % y )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (x * y) // greatest_common_divisor(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase = 20 ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = 1
for i in range(1 , n + 1 ):
_lowerCAmelCase : str = lcm(_lowerCamelCase , _lowerCamelCase )
return g
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a = None, __a = None, __a = None, __a = False, __a = False, __a = None, **__a, ):
'''simple docstring'''
super().__init__(
__a, split=__a, features=__a, cache_dir=__a, keep_in_memory=__a, streaming=__a, num_proc=__a, **__a, )
_lowerCAmelCase : Tuple = path_or_paths if isinstance(__a, __a) else {self.split: path_or_paths}
_lowerCAmelCase : Tuple = Text(
cache_dir=__a, data_files=__a, features=__a, **__a, )
def snake_case__ ( self):
'''simple docstring'''
if self.streaming:
_lowerCAmelCase : Optional[int] = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[Any] = None
self.builder.download_and_prepare(
download_config=__a, download_mode=__a, verification_mode=__a, base_path=__a, num_proc=self.num_proc, )
_lowerCAmelCase : Optional[Any] = self.builder.as_dataset(
split=self.split, verification_mode=__a, in_memory=self.keep_in_memory)
return dataset
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
_snake_case = ""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class UpperCAmelCase_ ( tr.AbstractTransform):
def __init__( self, __a = " "):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = sentence_delimiter
def snake_case__ ( self, __a):
'''simple docstring'''
return list(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int = []
for sent_idx, sentence in enumerate(__a):
chars.extend(self.process_string(__a))
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__a) - 1:
chars.append(self.sentence_delimiter)
return chars
_snake_case = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
_snake_case = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
_snake_case = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
_snake_case = "\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
_snake_case = "\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Value("string", id="sequence"),
}), codebase_urls=["https://github.com/jitsi/jiwer/"], reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
], )
def snake_case__ ( self, __a, __a, __a=False):
'''simple docstring'''
if concatenate_texts:
return jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )["wer"]
_lowerCAmelCase : Optional[int] = 0
_lowerCAmelCase : Tuple = 0
for prediction, reference in zip(__a, __a):
_lowerCAmelCase : Any = jiwer.compute_measures(
__a, __a, truth_transform=__a, hypothesis_transform=__a, )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
_snake_case = "\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_snake_case = [{"type": "code", "content": INSTALL_CONTENT}]
_snake_case = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
import numpy as np
def A ( _lowerCamelCase ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( _lowerCamelCase ):
'''simple docstring'''
return vector * sigmoid(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
while b:
_lowerCAmelCase , _lowerCAmelCase : str = b, a % b
return a
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(_lowerCamelCase , a % b )
def A ( ):
'''simple docstring'''
print(F"euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}" )
print(F"euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}" )
print(F"euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}" )
print(F"euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}" )
print(F"euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}" )
print(F"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}" )
print(F"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}" )
print(F"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}" )
print(F"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}" )
if __name__ == "__main__":
main()
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : str = [image]
if isinstance(image[0] , PIL.Image.Image ):
_lowerCAmelCase : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
_lowerCAmelCase : Dict = np.concatenate(_lowerCamelCase , axis=0 )
_lowerCAmelCase : Tuple = np.array(_lowerCamelCase ).astype(np.floataa ) / 2_55.0
_lowerCAmelCase : Optional[Any] = image.transpose(0 , 3 , 1 , 2 )
_lowerCAmelCase : Any = 2.0 * image - 1.0
_lowerCAmelCase : str = torch.from_numpy(_lowerCamelCase )
elif isinstance(image[0] , torch.Tensor ):
_lowerCAmelCase : Optional[Any] = torch.cat(_lowerCamelCase , dim=0 )
return image
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=0.99_95 ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , np.ndarray ):
_lowerCAmelCase : List[Any] = True
_lowerCAmelCase : Optional[Any] = va.device
_lowerCAmelCase : Dict = va.cpu().numpy()
_lowerCAmelCase : str = va.cpu().numpy()
_lowerCAmelCase : Dict = np.sum(va * va / (np.linalg.norm(_lowerCamelCase ) * np.linalg.norm(_lowerCamelCase )) )
if np.abs(_lowerCamelCase ) > DOT_THRESHOLD:
_lowerCAmelCase : Dict = (1 - t) * va + t * va
else:
_lowerCAmelCase : str = np.arccos(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = np.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = theta_a * t
_lowerCAmelCase : Tuple = np.sin(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = np.sin(theta_a - theta_t ) / sin_theta_a
_lowerCAmelCase : Optional[int] = sin_theta_t / sin_theta_a
_lowerCAmelCase : Optional[int] = sa * va + sa * va
if inputs_are_torch:
_lowerCAmelCase : List[Any] = torch.from_numpy(_lowerCamelCase ).to(_lowerCamelCase )
return va
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = F.normalize(_lowerCamelCase , dim=-1 )
_lowerCAmelCase : Tuple = F.normalize(_lowerCamelCase , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for param in model.parameters():
_lowerCAmelCase : Any = value
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a, __a, __a, __a, __a, __a=None, __a=None, __a=None, ):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__a, text_encoder=__a, clip_model=__a, tokenizer=__a, unet=__a, scheduler=__a, feature_extractor=__a, coca_model=__a, coca_tokenizer=__a, coca_transform=__a, )
_lowerCAmelCase : Optional[Any] = (
feature_extractor.size
if isinstance(feature_extractor.size, __a)
else feature_extractor.size["shortest_edge"]
)
_lowerCAmelCase : List[Any] = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
set_requires_grad(self.text_encoder, __a)
set_requires_grad(self.clip_model, __a)
def snake_case__ ( self, __a = "auto"):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_lowerCAmelCase : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__a)
def snake_case__ ( self):
'''simple docstring'''
self.enable_attention_slicing(__a)
def snake_case__ ( self):
'''simple docstring'''
set_requires_grad(self.vae, __a)
def snake_case__ ( self):
'''simple docstring'''
set_requires_grad(self.vae, __a)
def snake_case__ ( self):
'''simple docstring'''
set_requires_grad(self.unet, __a)
def snake_case__ ( self):
'''simple docstring'''
set_requires_grad(self.unet, __a)
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : List[str] = min(int(num_inference_steps * strength), __a)
_lowerCAmelCase : List[str] = max(num_inference_steps - init_timestep, 0)
_lowerCAmelCase : str = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def snake_case__ ( self, __a, __a, __a, __a, __a, __a=None):
'''simple docstring'''
if not isinstance(__a, torch.Tensor):
raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(__a)}")
_lowerCAmelCase : List[str] = image.to(device=__a, dtype=__a)
if isinstance(__a, __a):
_lowerCAmelCase : Any = [
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(__a)
]
_lowerCAmelCase : Optional[Any] = torch.cat(__a, dim=0)
else:
_lowerCAmelCase : Any = self.vae.encode(__a).latent_dist.sample(__a)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : List[str] = 0.18_215 * init_latents
_lowerCAmelCase : List[Any] = init_latents.repeat_interleave(__a, dim=0)
_lowerCAmelCase : Union[str, Any] = randn_tensor(init_latents.shape, generator=__a, device=__a, dtype=__a)
# get latents
_lowerCAmelCase : Union[str, Any] = self.scheduler.add_noise(__a, __a, __a)
_lowerCAmelCase : Optional[int] = init_latents
return latents
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.coca_transform(__a).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
_lowerCAmelCase : Optional[int] = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
_lowerCAmelCase : Tuple = self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.feature_extractor.preprocess(__a)
_lowerCAmelCase : Tuple = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
_lowerCAmelCase : int = self.clip_model.get_image_features(__a)
_lowerCAmelCase : str = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=__a)
_lowerCAmelCase : Union[str, Any] = image_embeddings_clip.repeat_interleave(__a, dim=0)
return image_embeddings_clip
@torch.enable_grad()
def snake_case__ ( self, __a, __a, __a, __a, __a, __a, __a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = latents.detach().requires_grad_()
_lowerCAmelCase : Any = self.scheduler.scale_model_input(__a, __a)
# predict the noise residual
_lowerCAmelCase : Union[str, Any] = self.unet(__a, __a, encoder_hidden_states=__a).sample
if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
_lowerCAmelCase : int = self.scheduler.alphas_cumprod[timestep]
_lowerCAmelCase : str = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_lowerCAmelCase : str = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_lowerCAmelCase : Optional[int] = torch.sqrt(__a)
_lowerCAmelCase : Union[str, Any] = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler, __a):
_lowerCAmelCase : List[Any] = self.scheduler.sigmas[index]
_lowerCAmelCase : int = latents - sigma * noise_pred
else:
raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : Any = 1 / 0.18_215 * sample
_lowerCAmelCase : Optional[Any] = self.vae.decode(__a).sample
_lowerCAmelCase : Tuple = (image / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : str = transforms.Resize(self.feature_extractor_size)(__a)
_lowerCAmelCase : Any = self.normalize(__a).to(latents.dtype)
_lowerCAmelCase : Tuple = self.clip_model.get_image_features(__a)
_lowerCAmelCase : Dict = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=__a)
_lowerCAmelCase : Any = spherical_dist_loss(__a, __a).mean() * clip_guidance_scale
_lowerCAmelCase : str = -torch.autograd.grad(__a, __a)[0]
if isinstance(self.scheduler, __a):
_lowerCAmelCase : Tuple = latents.detach() + grads * (sigma**2)
_lowerCAmelCase : List[Any] = noise_pred_original
else:
_lowerCAmelCase : Optional[int] = noise_pred_original - torch.sqrt(__a) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self, __a, __a, __a = None, __a = None, __a = 512, __a = 512, __a = 0.6, __a = 50, __a = 7.5, __a = 1, __a = 0.0, __a = 100, __a = None, __a = "pil", __a = True, __a = 0.8, __a = 0.1, __a = 0.1, ):
'''simple docstring'''
if isinstance(__a, __a) and len(__a) != batch_size:
raise ValueError(f"You have passed {batch_size} batch_size, but only {len(__a)} generators.")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if isinstance(__a, torch.Generator) and batch_size > 1:
_lowerCAmelCase : Dict = [generator] + [None] * (batch_size - 1)
_lowerCAmelCase : Dict = [
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
_lowerCAmelCase : Optional[Any] = [x[0] for x in coca_is_none if x[1]]
_lowerCAmelCase : Optional[int] = ", ".join(__a)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(__a):
raise ValueError(
f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
_lowerCAmelCase : List[str] = self.get_image_description(__a)
if style_prompt is None:
if len(__a):
raise ValueError(
f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.")
_lowerCAmelCase : List[str] = self.get_image_description(__a)
# get prompt text embeddings for content and style
_lowerCAmelCase : Optional[Any] = self.tokenizer(
__a, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=__a, return_tensors="pt", )
_lowerCAmelCase : List[Any] = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
_lowerCAmelCase : int = self.tokenizer(
__a, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=__a, return_tensors="pt", )
_lowerCAmelCase : List[str] = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
_lowerCAmelCase : str = slerp(__a, __a, __a)
# duplicate text embeddings for each generation per prompt
_lowerCAmelCase : Union[str, Any] = text_embeddings.repeat_interleave(__a, dim=0)
# set timesteps
_lowerCAmelCase : Tuple = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
_lowerCAmelCase : List[str] = {}
if accepts_offset:
_lowerCAmelCase : Optional[int] = 1
self.scheduler.set_timesteps(__a, **__a)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = self.get_timesteps(__a, __a, self.device)
_lowerCAmelCase : Dict = timesteps[:1].repeat(__a)
# Preprocess image
_lowerCAmelCase : Optional[int] = preprocess(__a, __a, __a)
_lowerCAmelCase : Any = self.prepare_latents(
__a, __a, __a, text_embeddings.dtype, self.device, __a)
_lowerCAmelCase : List[Any] = preprocess(__a, __a, __a)
_lowerCAmelCase : str = self.prepare_latents(
__a, __a, __a, text_embeddings.dtype, self.device, __a)
_lowerCAmelCase : Tuple = slerp(__a, __a, __a)
if clip_guidance_scale > 0:
_lowerCAmelCase : List[str] = self.get_clip_image_embeddings(__a, __a)
_lowerCAmelCase : Optional[Any] = self.get_clip_image_embeddings(__a, __a)
_lowerCAmelCase : Optional[int] = slerp(
__a, __a, __a)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCAmelCase : Union[str, Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase : str = content_text_input.input_ids.shape[-1]
_lowerCAmelCase : Union[str, Any] = self.tokenizer([""], padding="max_length", max_length=__a, return_tensors="pt")
_lowerCAmelCase : str = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
_lowerCAmelCase : Optional[int] = uncond_embeddings.repeat_interleave(__a, dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCAmelCase : Optional[int] = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCAmelCase : str = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_lowerCAmelCase : Union[str, Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_lowerCAmelCase : List[str] = torch.randn(__a, generator=__a, device="cpu", dtype=__a).to(
self.device)
else:
_lowerCAmelCase : Optional[Any] = torch.randn(__a, generator=__a, device=self.device, dtype=__a)
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
_lowerCAmelCase : Dict = latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
_lowerCAmelCase : Tuple = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCAmelCase : List[Any] = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
_lowerCAmelCase : Optional[int] = {}
if accepts_eta:
_lowerCAmelCase : List[Any] = eta
# check if the scheduler accepts generator
_lowerCAmelCase : Optional[int] = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
_lowerCAmelCase : Optional[int] = generator
with self.progress_bar(total=__a):
for i, t in enumerate(__a):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowerCAmelCase : List[str] = self.scheduler.scale_model_input(__a, __a)
# predict the noise residual
_lowerCAmelCase : List[Any] = self.unet(__a, __a, encoder_hidden_states=__a).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = noise_pred.chunk(2)
_lowerCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_lowerCAmelCase : str = (
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
_lowerCAmelCase , _lowerCAmelCase : str = self.cond_fn(
__a, __a, __a, __a, __a, __a, __a, )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(__a, __a, __a, **__a).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_lowerCAmelCase : Tuple = 1 / 0.18_215 * latents
_lowerCAmelCase : Optional[Any] = self.vae.decode(__a).sample
_lowerCAmelCase : List[Any] = (image / 2 + 0.5).clamp(0, 1)
_lowerCAmelCase : List[Any] = image.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowerCAmelCase : str = self.numpy_to_pil(__a)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=__a, nsfw_content_detected=__a)
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'convnextv2'
def __init__( self, __a=3, __a=4, __a=4, __a=None, __a=None, __a="gelu", __a=0.02, __a=1E-12, __a=0.0, __a=224, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = num_channels
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Dict = num_stages
_lowerCAmelCase : int = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes
_lowerCAmelCase : List[Any] = [3, 3, 9, 3] if depths is None else depths
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Optional[Any] = layer_norm_eps
_lowerCAmelCase : int = drop_path_rate
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : str = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def A ( _lowerCamelCase ):
'''simple docstring'''
if "model" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("model." , "" )
if "norm1" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
_lowerCAmelCase : int = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
_lowerCAmelCase : Optional[int] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
_lowerCAmelCase : Optional[int] = orig_key.split("." )[0].split("_" )[-1]
_lowerCAmelCase : Dict = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
_lowerCAmelCase : str = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
_lowerCAmelCase : List[str] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
_lowerCAmelCase : Optional[Any] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
_lowerCAmelCase : List[Any] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
_lowerCAmelCase : Any = "yoso." + orig_key
return orig_key
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : List[Any] = orig_state_dict.pop(_lowerCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowerCAmelCase : Any = val
_lowerCAmelCase : Any = orig_state_dict["cls.predictions.decoder.bias"]
_lowerCAmelCase : Optional[int] = torch.arange(_lowerCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )["model_state_dict"]
_lowerCAmelCase : Optional[int] = YosoConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : List[str] = YosoForMaskedLM(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = convert_checkpoint_helper(config.max_position_embeddings , _lowerCamelCase )
print(model.load_state_dict(_lowerCamelCase ) )
model.eval()
model.save_pretrained(_lowerCamelCase )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
from __future__ import annotations
from random import random
class UpperCAmelCase_ :
def __init__( self, __a = None):
'''simple docstring'''
_lowerCAmelCase : Tuple = value
_lowerCAmelCase : Dict = random()
_lowerCAmelCase : Node | None = None
_lowerCAmelCase : Node | None = None
def __repr__( self):
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"'{self.value}: {self.prior:.5}'"
else:
return pformat(
{f"{self.value}: {self.prior:.5}": (self.left, self.right)}, indent=1)
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : int = str(self.value) + " "
_lowerCAmelCase : Optional[Any] = str(self.left or "")
_lowerCAmelCase : List[str] = str(self.right or "")
return value + left + right
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
_lowerCAmelCase , _lowerCAmelCase : Dict = split(root.left , _lowerCamelCase )
return left, root
else:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = split(root.right , _lowerCamelCase )
return root, right
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
_lowerCAmelCase : Optional[int] = merge(left.right , _lowerCamelCase )
return left
else:
_lowerCAmelCase : str = merge(_lowerCamelCase , right.left )
return right
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = Node(_lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : List[Any] = split(_lowerCamelCase , _lowerCamelCase )
return merge(merge(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = split(_lowerCamelCase , value - 1 )
_lowerCAmelCase , _lowerCAmelCase : List[str] = split(_lowerCamelCase , _lowerCamelCase )
return merge(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
if not root: # None
return
else:
inorder(root.left )
print(root.value , end="," )
inorder(root.right )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for arg in args.split():
if arg[0] == "+":
_lowerCAmelCase : int = insert(_lowerCamelCase , int(arg[1:] ) )
elif arg[0] == "-":
_lowerCAmelCase : Dict = erase(_lowerCamelCase , int(arg[1:] ) )
else:
print("Unknown command" )
return root
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = None
print(
"enter numbers to create a tree, + value to add value into treap, "
"- value to erase all nodes with value. 'q' to quit. " )
_lowerCAmelCase : List[Any] = input()
while args != "q":
_lowerCAmelCase : Dict = interact_treap(_lowerCamelCase , _lowerCamelCase )
print(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = input()
print("good by!" )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from __future__ import annotations
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Any = position
_lowerCAmelCase : Union[str, Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_lowerCAmelCase : Optional[Any] = []
for position in positions:
_lowerCAmelCase , _lowerCAmelCase : str = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_lowerCamelCase )
return permissible_positions
def A ( _lowerCamelCase ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if is_complete(_lowerCamelCase ):
return True
for position in get_valid_pos(_lowerCamelCase , len(_lowerCamelCase ) ):
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = position
if board[y][x] == 0:
_lowerCAmelCase : Any = curr + 1
if open_knight_tour_helper(_lowerCamelCase , _lowerCamelCase , curr + 1 ):
return True
_lowerCAmelCase : List[str] = 0
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = 1
if open_knight_tour_helper(_lowerCamelCase , (i, j) , 1 ):
return board
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : List[Any] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "sentencepiece.model"}
_snake_case = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
}
_snake_case = {
"google/rembert": 256,
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self, __a, __a=False, __a=True, __a=True, __a="[CLS]", __a="[SEP]", __a="[UNK]", __a="[SEP]", __a="[PAD]", __a="[CLS]", __a="[MASK]", **__a, ):
'''simple docstring'''
super().__init__(
do_lower_case=__a, remove_space=__a, keep_accents=__a, bos_token=__a, eos_token=__a, unk_token=__a, sep_token=__a, pad_token=__a, cls_token=__a, mask_token=__a, **__a, )
_lowerCAmelCase : str = do_lower_case
_lowerCAmelCase : Union[str, Any] = remove_space
_lowerCAmelCase : Any = keep_accents
_lowerCAmelCase : Any = vocab_file
_lowerCAmelCase : List[Any] = spm.SentencePieceProcessor()
self.sp_model.Load(__a)
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.sp_model)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = {self.convert_ids_to_tokens(__a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = d
_lowerCAmelCase : str = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file)
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.sp_model.EncodeAsPieces(__a)
return pieces
def snake_case__ ( self, __a):
'''simple docstring'''
return self.sp_model.PieceToId(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.sp_model.IdToPiece(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.sp_model.decode_pieces(__a)
return out_string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Tuple = [self.sep_token_id]
_lowerCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def snake_case__ ( self, __a, __a = None, __a = False):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__a)) + [1] + ([0] * len(__a)) + [1]
return [1] + ([0] * len(__a)) + [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : int = [self.sep_token_id]
_lowerCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error("Vocabulary path ({}) should be a directory".format(__a))
return
_lowerCAmelCase : Union[str, Any] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(__a):
copyfile(self.vocab_file, __a)
return (out_vocab_file,)
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
_snake_case = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
_snake_case = ["a", "b", "c", "d", "e"]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = start
# add current to visited
visited.append(_lowerCamelCase )
_lowerCAmelCase : str = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
_lowerCAmelCase : Optional[int] = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCamelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
for vertice in vertices:
if vertice not in visited:
_lowerCAmelCase : Tuple = topological_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# return sort
return sort
if __name__ == "__main__":
_snake_case = topological_sort("a", [], [])
print(sort)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
_snake_case = trt.Logger(trt.Logger.WARNING)
_snake_case = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
_snake_case = logging.getLogger(__name__)
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
_snake_case = parser.parse_args()
if args.tokenizer_name:
_snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
_snake_case = args.per_device_eval_batch_size
_snake_case = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
_snake_case = True
_snake_case = "temp_engine/bert-fp32.engine"
if args.fpaa:
_snake_case = "temp_engine/bert-fp16.engine"
if args.inta:
_snake_case = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
_snake_case = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
_snake_case = [network.get_input(i) for i in range(network.num_inputs)]
_snake_case = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
_snake_case = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
_snake_case = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
_snake_case = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = np.asarray(inputs["input_ids"] , dtype=np.intaa )
_lowerCAmelCase : List[str] = np.asarray(inputs["attention_mask"] , dtype=np.intaa )
_lowerCAmelCase : Dict = np.asarray(inputs["token_type_ids"] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
_lowerCAmelCase : Any = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
_lowerCAmelCase : Union[str, Any] = time.time()
_lowerCAmelCase : Any = end_time - start_time
_lowerCAmelCase : List[Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
_snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_snake_case = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
_snake_case = raw_datasets["validation"].column_names
_snake_case = "question" if "question" in column_names else column_names[0]
_snake_case = "context" if "context" in column_names else column_names[1]
_snake_case = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
_snake_case = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'''
)
_snake_case = min(args.max_seq_length, tokenizer.model_max_length)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_lowerCAmelCase : Union[str, Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="only_second" if pad_on_right else "only_first" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="max_length" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_lowerCAmelCase : List[Any] = tokenized_examples.pop("overflow_to_sample_mapping" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_lowerCAmelCase : str = []
for i in range(len(tokenized_examples["input_ids"] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_lowerCAmelCase : Optional[int] = tokenized_examples.sequence_ids(_lowerCamelCase )
_lowerCAmelCase : str = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_lowerCAmelCase : Any = sample_mapping[i]
tokenized_examples["example_id"].append(examples["id"][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_lowerCAmelCase : int = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["offset_mapping"][i] )
]
return tokenized_examples
_snake_case = raw_datasets["validation"]
# Validation Feature Creation
_snake_case = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
_snake_case = default_data_collator
_snake_case = eval_dataset.remove_columns(["example_id", "offset_mapping"])
_snake_case = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="eval" ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_lowerCAmelCase : Any = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
_lowerCAmelCase : Any = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
_lowerCAmelCase : int = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
_snake_case = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A ( _lowerCamelCase ):
'''simple docstring'''
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
_snake_case = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
_snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
_snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
_snake_case = cuda.mem_alloc(h_outputa.nbytes)
_snake_case = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
_snake_case = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f''' Num examples = {len(eval_dataset)}''')
logger.info(f''' Batch size = {args.per_device_eval_batch_size}''')
_snake_case = 0.0
_snake_case = 0
_snake_case = timeit.default_timer()
_snake_case = None
for step, batch in enumerate(eval_dataloader):
_snake_case, _snake_case = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
_snake_case, _snake_case = outputs
_snake_case = torch.tensor(start_logits)
_snake_case = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
_snake_case = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
_snake_case = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
_snake_case = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
_snake_case = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
_snake_case = nested_truncate(all_preds, len(eval_dataset))
_snake_case = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1000))
logger.info("Total Number of Inference = %d", niter)
_snake_case = post_processing_function(eval_examples, eval_dataset, all_preds)
_snake_case = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'''Evaluation metrics: {eval_metric}''')
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
import re
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = re.compile(
r"^(?:0|94|\+94|0{2}94)" r"7(0|1|2|4|5|6|7|8)" r"(-| |)" r"\d{7}$" )
return bool(re.search(_lowerCamelCase , _lowerCamelCase ) )
if __name__ == "__main__":
_snake_case = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
_snake_case = logging.getLogger(__name__)
class UpperCAmelCase_ ( a):
def __init__( self, __a=-1):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = label_idx
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : List[Any] = mode.value
_lowerCAmelCase : Dict = os.path.join(__a, f"{mode}.txt")
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[int] = []
with open(__a, encoding="utf-8") as f:
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Dict = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=__a, labels=__a))
guid_index += 1
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Dict = []
else:
_lowerCAmelCase : List[Any] = line.split(" ")
words.append(splits[0])
if len(__a) > 1:
labels.append(splits[self.label_idx].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=__a, labels=__a))
return examples
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
writer.write(__a)
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
_lowerCAmelCase : Dict = line.split()[0] + " " + preds_list[example_id].pop(0) + "\n"
writer.write(__a)
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'.", line.split()[0])
def snake_case__ ( self, __a):
'''simple docstring'''
if path:
with open(__a, "r") as f:
_lowerCAmelCase : Optional[Any] = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : Optional[int] = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class UpperCAmelCase_ ( a):
def __init__( self):
'''simple docstring'''
super().__init__(label_idx=-2)
def snake_case__ ( self, __a):
'''simple docstring'''
if path:
with open(__a, "r") as f:
_lowerCAmelCase : Any = f.read().splitlines()
if "O" not in labels:
_lowerCAmelCase : List[str] = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class UpperCAmelCase_ ( a):
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if isinstance(__a, __a):
_lowerCAmelCase : Optional[Any] = mode.value
_lowerCAmelCase : Optional[int] = os.path.join(__a, f"{mode}.txt")
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : Tuple = []
with open(__a, encoding="utf-8") as f:
for sentence in parse_incr(__a):
_lowerCAmelCase : int = []
_lowerCAmelCase : Dict = []
for token in sentence:
words.append(token["form"])
labels.append(token["upos"])
assert len(__a) == len(__a)
if words:
examples.append(InputExample(guid=f"{mode}-{guid_index}", words=__a, labels=__a))
guid_index += 1
return examples
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = 0
for sentence in parse_incr(__a):
_lowerCAmelCase : Dict = preds_list[example_id]
_lowerCAmelCase : Tuple = ""
for token in sentence:
out += f"{token['form']} ({token['upos']}|{s_p.pop(0)}) "
out += "\n"
writer.write(__a)
example_id += 1
def snake_case__ ( self, __a):
'''simple docstring'''
if path:
with open(__a, "r") as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json",
"BridgeTower/bridgetower-base-itm-mlm": (
"https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'bridgetower_vision_model'
def __init__( self, __a=768, __a=12, __a=3, __a=16, __a=288, __a=1, __a=1E-05, __a=False, __a=True, __a=False, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : int = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Optional[Any] = initializer_factor
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : Dict = stop_gradient
_lowerCAmelCase : List[Any] = share_layernorm
_lowerCAmelCase : Tuple = remove_last_layer
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = cls.get_config_dict(__a, **__a)
if config_dict.get("model_type") == "bridgetower":
_lowerCAmelCase : Dict = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'bridgetower_text_model'
def __init__( self, __a=5_0265, __a=768, __a=12, __a=12, __a=1, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=514, __a=1, __a=1E-05, __a=1, __a=0, __a=2, __a="absolute", __a=True, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = vocab_size
_lowerCAmelCase : Tuple = hidden_size
_lowerCAmelCase : Union[str, Any] = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Any = initializer_factor
_lowerCAmelCase : Tuple = intermediate_size
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Any = use_cache
_lowerCAmelCase : Tuple = pad_token_id
_lowerCAmelCase : Any = bos_token_id
_lowerCAmelCase : int = eos_token_id
@classmethod
def snake_case__ ( cls, __a, **__a):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = cls.get_config_dict(__a, **__a)
if config_dict.get("model_type") == "bridgetower":
_lowerCAmelCase : Optional[int] = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors.")
return cls.from_dict(__a, **__a)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'bridgetower'
def __init__( self, __a=True, __a="gelu", __a=768, __a=1, __a=1E-05, __a=False, __a="add", __a=12, __a=6, __a=False, __a=False, __a=None, __a=None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = kwargs.pop("text_config_dict", __a)
_lowerCAmelCase : Any = kwargs.pop("vision_config_dict", __a)
super().__init__(**__a)
_lowerCAmelCase : Optional[Any] = share_cross_modal_transformer_layers
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : Tuple = initializer_factor
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Optional[int] = share_link_tower_layers
_lowerCAmelCase : Any = link_tower_type
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = num_hidden_layers
_lowerCAmelCase : str = tie_word_embeddings
_lowerCAmelCase : Tuple = init_layernorm_from_vision_encoder
if text_config is None:
_lowerCAmelCase : str = {}
logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.")
if vision_config is None:
_lowerCAmelCase : int = {}
logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.")
_lowerCAmelCase : Any = BridgeTowerTextConfig(**__a)
_lowerCAmelCase : List[str] = BridgeTowerVisionConfig(**__a)
@classmethod
def snake_case__ ( cls, __a, __a, **__a):
'''simple docstring'''
return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = copy.deepcopy(self.__dict__)
_lowerCAmelCase : Tuple = self.text_config.to_dict()
_lowerCAmelCase : Optional[int] = self.vision_config.to_dict()
_lowerCAmelCase : int = self.__class__.model_type
return output
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
_snake_case = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
_snake_case = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
_snake_case = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def A ( _lowerCamelCase ):
'''simple docstring'''
def remove_articles(_lowerCamelCase ):
_lowerCAmelCase : Tuple = re.compile(r"\b(a|an|the)\b" , re.UNICODE )
return re.sub(_lowerCamelCase , " " , _lowerCamelCase )
def white_space_fix(_lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(_lowerCamelCase ):
_lowerCAmelCase : str = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(_lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(_lowerCamelCase ) ) ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return int(normalize_answer(_lowerCamelCase ) == normalize_answer(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [any(compute_exact(_lowerCamelCase , _lowerCamelCase ) for ref in refs ) for pred, refs in zip(_lowerCamelCase , _lowerCamelCase )]
return (sum(_lowerCamelCase ) / len(_lowerCamelCase )) * 100
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = [rgram for rgrams in rgramslist for rgram in rgrams]
_lowerCAmelCase : Optional[Any] = Counter(_lowerCamelCase )
_lowerCAmelCase : List[Any] = Counter(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = Counter()
for sgram, scount in sgramcounter.items():
_lowerCAmelCase : str = scount * numref
_lowerCAmelCase : str = Counter(_lowerCamelCase )
_lowerCAmelCase : str = Counter()
for cgram, ccount in cgramcounter.items():
_lowerCAmelCase : Dict = ccount * numref
# KEEP
_lowerCAmelCase : Dict = sgramcounter_rep & cgramcounter_rep
_lowerCAmelCase : str = keepgramcounter_rep & rgramcounter
_lowerCAmelCase : Tuple = sgramcounter_rep & rgramcounter
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[Any] = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : str = 1
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Optional[Any] = keeptmpscorea / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
_lowerCAmelCase : int = keeptmpscorea / sum(keepgramcounterall_rep.values() )
_lowerCAmelCase : str = 0
if keepscore_precision > 0 or keepscore_recall > 0:
_lowerCAmelCase : Dict = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
_lowerCAmelCase : List[Any] = sgramcounter_rep - cgramcounter_rep
_lowerCAmelCase : List[str] = delgramcounter_rep - rgramcounter
_lowerCAmelCase : Any = sgramcounter_rep - rgramcounter
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Optional[int] = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase : Optional[Any] = 1
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : List[str] = deltmpscorea / len(_lowerCamelCase )
# ADDITION
_lowerCAmelCase : List[Any] = set(_lowerCamelCase ) - set(_lowerCamelCase )
_lowerCAmelCase : Dict = set(_lowerCamelCase ) & set(_lowerCamelCase )
_lowerCAmelCase : str = set(_lowerCamelCase ) - set(_lowerCamelCase )
_lowerCAmelCase : int = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Dict = 1
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Any = addtmpscore / len(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : Any = addtmpscore / len(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = 0
if addscore_precision > 0 or addscore_recall > 0:
_lowerCAmelCase : Optional[int] = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
_lowerCAmelCase : Tuple = ssent.split(" " )
_lowerCAmelCase : List[Any] = csent.split(" " )
_lowerCAmelCase : Optional[Any] = []
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = []
_lowerCAmelCase : str = []
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : int = []
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Optional[Any] = []
for rsent in rsents:
_lowerCAmelCase : Dict = rsent.split(" " )
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Tuple = []
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Optional[Any] = ragrams[i] + " " + ragrams[i + 1]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
_lowerCAmelCase : Union[str, Any] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2]
ragrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
_lowerCAmelCase : Optional[int] = ragrams[i] + " " + ragrams[i + 1] + " " + ragrams[i + 2] + " " + ragrams[i + 3]
ragrams.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
ragramslist.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : List[str] = sagrams[i] + " " + sagrams[i + 1]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
_lowerCAmelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2]
sagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
_lowerCAmelCase : Union[str, Any] = sagrams[i] + " " + sagrams[i + 1] + " " + sagrams[i + 2] + " " + sagrams[i + 3]
sagrams.append(_lowerCamelCase )
for i in range(0 , len(_lowerCamelCase ) - 1 ):
if i < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = cagrams[i] + " " + cagrams[i + 1]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 2:
_lowerCAmelCase : List[Any] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2]
cagrams.append(_lowerCamelCase )
if i < len(_lowerCamelCase ) - 3:
_lowerCAmelCase : Optional[int] = cagrams[i] + " " + cagrams[i + 1] + " " + cagrams[i + 2] + " " + cagrams[i + 3]
cagrams.append(_lowerCamelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[int] = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Any = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Optional[int] = SARIngram(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
_lowerCAmelCase : str = sum([delascore, delascore, delascore, delascore] ) / 4
_lowerCAmelCase : int = sum([addascore, addascore, addascore, addascore] ) / 4
_lowerCAmelCase : List[Any] = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def A ( _lowerCamelCase , _lowerCamelCase = True , _lowerCamelCase = "13a" , _lowerCamelCase = True ):
'''simple docstring'''
if lowercase:
_lowerCAmelCase : Optional[Any] = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
_lowerCAmelCase : Tuple = sacrebleu.metrics.bleu._get_tokenizer(_lowerCamelCase )()(_lowerCamelCase )
else:
_lowerCAmelCase : List[str] = sacrebleu.TOKENIZERS[tokenizer]()(_lowerCamelCase )
elif tokenizer == "moses":
_lowerCAmelCase : Optional[int] = sacremoses.MosesTokenizer().tokenize(_lowerCamelCase , return_str=_lowerCamelCase , escape=_lowerCamelCase )
elif tokenizer == "penn":
_lowerCAmelCase : List[str] = sacremoses.MosesTokenizer().penn_tokenize(_lowerCamelCase , return_str=_lowerCamelCase )
else:
_lowerCAmelCase : Optional[int] = sentence
if not return_str:
_lowerCAmelCase : Tuple = normalized_sent.split()
return normalized_sent
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not (len(_lowerCamelCase ) == len(_lowerCamelCase ) == len(_lowerCamelCase )):
raise ValueError("Sources length must match predictions and references lengths." )
_lowerCAmelCase : Union[str, Any] = 0
for src, pred, refs in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
sari_score += SARIsent(normalize(_lowerCamelCase ) , normalize(_lowerCamelCase ) , [normalize(_lowerCamelCase ) for sent in refs] )
_lowerCAmelCase : str = sari_score / len(_lowerCamelCase )
return 100 * sari_score
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase="exp" , _lowerCamelCase=None , _lowerCamelCase=False , _lowerCamelCase=False , _lowerCamelCase=False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
_lowerCAmelCase : str = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
_lowerCAmelCase : Dict = sacrebleu.corpus_bleu(
_lowerCamelCase , _lowerCamelCase , smooth_method=_lowerCamelCase , smooth_value=_lowerCamelCase , force=_lowerCamelCase , lowercase=_lowerCamelCase , use_effective_order=_lowerCamelCase , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Value("string", id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="sequence"), id="references"),
}), codebase_urls=[
"https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py",
"https://github.com/cocoxu/simplification/blob/master/SARI.py",
"https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py",
"https://github.com/mjpost/sacreBLEU",
], reference_urls=[
"https://www.aclweb.org/anthology/Q16-1029.pdf",
"https://github.com/mjpost/sacreBLEU",
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
result.update({"sari": compute_sari(sources=__a, predictions=__a, references=__a)})
result.update({"sacrebleu": compute_sacrebleu(predictions=__a, references=__a)})
result.update({"exact": compute_em(predictions=__a, references=__a)})
return result
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_snake_case = 10
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for i in range(_lowerCamelCase , _lowerCamelCase ):
if array[i] == target:
return i
return -1
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = 0
_lowerCAmelCase : Union[str, Any] = len(_lowerCamelCase )
while left <= right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[int] = (left + right) // 3 + 1
_lowerCAmelCase : Optional[int] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
_lowerCAmelCase : List[Any] = one_third - 1
elif array[two_third] < target:
_lowerCAmelCase : Any = two_third + 1
else:
_lowerCAmelCase : Optional[int] = one_third + 1
_lowerCAmelCase : Union[str, Any] = two_third - 1
else:
return -1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if left < right:
if right - left < precision:
return lin_search(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = (left + right) // 3 + 1
_lowerCAmelCase : int = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowerCamelCase , one_third - 1 , _lowerCamelCase , _lowerCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowerCamelCase , _lowerCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input("Enter numbers separated by comma:\n").strip()
_snake_case = [int(item.strip()) for item in user_input.split(",")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_snake_case = int(input("Enter the number to be found in the list:\n").strip())
_snake_case = ite_ternary_search(collection, target)
_snake_case = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("Not found")
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return "\n".join(
F"{number} * {i} = {number * i}" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("check_bouncy() accepts only integer arguments" )
_lowerCAmelCase : Union[str, Any] = str(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = "".join(sorted(_lowerCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def A ( _lowerCamelCase = 99 ):
'''simple docstring'''
if not 0 < percent < 100:
raise ValueError("solution() only accepts values from 0 to 100" )
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = 1
while True:
if check_bouncy(_lowerCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(99)}''')
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class UpperCAmelCase_ ( a):
def __init__( self, __a=0.01, __a=1000):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = p_stop
_lowerCAmelCase : Dict = max_length
def __iter__( self):
'''simple docstring'''
_lowerCAmelCase : str = 0
_lowerCAmelCase : List[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
_lowerCAmelCase : Tuple = random.random() < self.p_stop
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self, __a, __a, __a=False, __a=True):
'''simple docstring'''
_lowerCAmelCase : Dict = [
BatchSamplerShard(__a, 2, __a, split_batches=__a, even_batches=__a)
for i in range(2)
]
_lowerCAmelCase : Optional[Any] = [list(__a) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__a) for shard in batch_sampler_shards], [len(__a) for e in expected])
self.assertListEqual(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = BatchSampler(range(24), batch_size=3, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : str = BatchSampler(range(24), batch_size=3, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : List[Any] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Tuple = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : int = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : List[Any] = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : str = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : str = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__a, __a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : Dict = [[], []]
self.check_batch_sampler_shards(__a, __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = BatchSampler(range(24), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : Optional[int] = BatchSampler(range(24), batch_size=4, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : List[str] = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : str = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : int = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : Optional[int] = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[str] = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : Dict = [[], []]
self.check_batch_sampler_shards(__a, __a, split_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BatchSampler(range(24), batch_size=3, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Any = BatchSampler(range(24), batch_size=3, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_lowerCAmelCase : List[Any] = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : List[str] = BatchSampler(range(21), batch_size=3, drop_last=__a)
_lowerCAmelCase : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_lowerCAmelCase : Tuple = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : List[str] = BatchSampler(range(22), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Dict = BatchSampler(range(20), batch_size=3, drop_last=__a)
_lowerCAmelCase : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : int = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
_lowerCAmelCase : Tuple = BatchSampler(range(2), batch_size=3, drop_last=__a)
_lowerCAmelCase : Tuple = [[], []]
self.check_batch_sampler_shards(__a, __a, even_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = BatchSampler(range(24), batch_size=4, drop_last=__a)
_lowerCAmelCase : Tuple = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : Union[str, Any] = BatchSampler(range(24), batch_size=4, drop_last=__a)
# Expected shouldn't change
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size.
_lowerCAmelCase : List[Any] = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : List[Any] = BatchSampler(range(22), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[int] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_lowerCAmelCase : Any = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : Tuple = BatchSampler(range(21), batch_size=4, drop_last=__a)
_lowerCAmelCase : Union[str, Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
# Check the shards when the dataset is very small.
_lowerCAmelCase : Dict = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : List[str] = [[[0, 1]], []]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
_lowerCAmelCase : Optional[int] = BatchSampler(range(2), batch_size=4, drop_last=__a)
_lowerCAmelCase : Optional[Any] = [[], []]
self.check_batch_sampler_shards(__a, __a, split_batches=__a, even_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_lowerCAmelCase : List[Any] = [BatchSamplerShard(__a, 2, __a, even_batches=__a) for i in range(2)]
self.assertEqual(len(batch_sampler_shards[0]), 3)
self.assertEqual(len(batch_sampler_shards[1]), 2)
self.assertListEqual(list(batch_sampler_shards[0]), [[0, 1, 2], [5, 6, 7, 8], [12, 13]])
self.assertListEqual(list(batch_sampler_shards[1]), [[3, 4], [9, 10, 11]])
def snake_case__ ( self, __a, __a, __a, __a=False, __a=2, __a=False):
'''simple docstring'''
random.seed(__a)
_lowerCAmelCase : Dict = list(__a)
_lowerCAmelCase : str = [
IterableDatasetShard(
__a, batch_size=__a, drop_last=__a, num_processes=__a, process_index=__a, split_batches=__a, )
for i in range(__a)
]
_lowerCAmelCase : Dict = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__a)
iterable_dataset_lists.append(list(__a))
_lowerCAmelCase : str = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_lowerCAmelCase : Any = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__a), len(__a))
self.assertTrue(len(__a) % shard_batch_size == 0)
_lowerCAmelCase : Optional[int] = []
for idx in range(0, len(__a), __a):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__a) < len(__a):
reference += reference
self.assertListEqual(__a, reference[: len(__a)])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = 42
_lowerCAmelCase : Dict = RandomIterableDataset()
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
# Edge case with a very small dataset
_lowerCAmelCase : List[str] = RandomIterableDataset(max_length=2)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
self.check_iterable_dataset_shards(__a, __a, batch_size=4, drop_last=__a, split_batches=__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = BatchSampler(range(16), batch_size=4, drop_last=__a)
_lowerCAmelCase : Dict = SkipBatchSampler(__a, 2)
self.assertListEqual(list(__a), [[8, 9, 10, 11], [12, 13, 14, 15]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = SkipDataLoader(list(range(16)), batch_size=4, skip_batches=2)
self.assertListEqual([t.tolist() for t in dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = DataLoader(list(range(16)), batch_size=4)
_lowerCAmelCase : Dict = skip_first_batches(__a, num_batches=2)
self.assertListEqual([t.tolist() for t in new_dataloader], [[8, 9, 10, 11], [12, 13, 14, 15]])
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = DataLoaderShard(list(range(16)), batch_size=4)
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
def snake_case__ ( self):
'''simple docstring'''
Accelerator()
_lowerCAmelCase : Optional[int] = DataLoaderDispatcher(range(16), batch_size=4)
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
# Test it also works on the second iteration
for idx, _ in enumerate(__a):
self.assertEqual(dataloader.end_of_dataloader, idx == 3)
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class UpperCAmelCase_ :
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
return None
class UpperCAmelCase_ :
def snake_case__ ( self, __a, __a, __a, __a):
'''simple docstring'''
return None
class UpperCAmelCase_ ( unittest.TestCase):
lowerCamelCase__ = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def snake_case__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a, "tf", 12, **__a)
@require_torch
@slow
def snake_case__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__a, "pt", 12, **__a)
@require_torch
@slow
def snake_case__ ( self):
'''simple docstring'''
from transformers import BertModel
_lowerCAmelCase : List[str] = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t") as vocab_file:
vocab_file.write("\n".join(__a))
vocab_file.flush()
_lowerCAmelCase : Optional[Any] = BertTokenizerFast(vocab_file.name)
with TemporaryDirectory() as bert_save_dir:
_lowerCAmelCase : Any = BertModel(BertConfig(vocab_size=len(__a)))
model.save_pretrained(__a)
self._test_export(__a, "pt", 12, __a)
@require_tf
@slow
def snake_case__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCAmelCase : Tuple = self._test_export(__a, "tf", 12, **__a)
_lowerCAmelCase : int = quantize(Path(__a))
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model")
@require_torch
@slow
def snake_case__ ( self):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
_lowerCAmelCase : str = self._test_export(__a, "pt", 12, **__a)
_lowerCAmelCase : int = quantize(__a)
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__a).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model")
def snake_case__ ( self, __a, __a, __a, __a=None, **__a):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : Optional[Any] = Path(__a).joinpath("model.onnx")
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__a, __a, __a, __a, __a, **__a)
return path
except Exception as e:
self.fail(__a)
@require_torch
@require_tokenizers
@slow
def snake_case__ ( self):
'''simple docstring'''
from transformers import BertModel
_lowerCAmelCase : Dict = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random"))
_lowerCAmelCase : Any = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random")
self._test_infer_dynamic_axis(__a, __a, "pt")
@require_tf
@require_tokenizers
@slow
def snake_case__ ( self):
'''simple docstring'''
from transformers import TFBertModel
_lowerCAmelCase : int = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random"))
_lowerCAmelCase : Optional[Any] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random")
self._test_infer_dynamic_axis(__a, __a, "tf")
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = FeatureExtractionPipeline(__a, __a)
_lowerCAmelCase : Any = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = infer_shapes(__a, __a)
# Assert all variables are present
self.assertEqual(len(__a), len(__a))
self.assertTrue(all(var_name in shapes for var_name in variable_names))
self.assertSequenceEqual(variable_names[:3], __a)
self.assertSequenceEqual(variable_names[3:], __a)
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name], {0: "batch", 1: "sequence"})
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"], {0: "batch", 1: "sequence"})
self.assertDictEqual(shapes["output_1"], {0: "batch"})
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = ["input_ids", "attention_mask", "token_type_ids"]
_lowerCAmelCase : Optional[Any] = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = ensure_valid_input(FuncContiguousArgs(), __a, __a)
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__a), 3)
# Should have exactly the same input names
self.assertEqual(set(__a), set(__a))
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__a, (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]))
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
_lowerCAmelCase , _lowerCAmelCase : List[Any] = ensure_valid_input(FuncNonContiguousArgs(), __a, __a)
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__a), 1)
self.assertEqual(len(__a), 1)
# Should have only "input_ids"
self.assertEqual(inputs_args[0], tokens["input_ids"])
self.assertEqual(ordered_input_names[0], "input_ids")
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = generate_identified_filename(Path("/home/something/my_fake_model.onnx"), "-test")
self.assertEqual("/home/something/my_fake_model-test.onnx", generated.as_posix())
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Dict = flatten_dict(_lowerCamelCase )
return flax_params
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : str = {
"token_embedder": "embeddings",
"encoder_norm": "layernorm",
"kernel": "weight",
".out": ".output",
"scale": "weight",
"embedders_0.pos_embedding": "row_embedder.weight",
"embedders_1.pos_embedding": "column_embedder.weight",
}
_lowerCAmelCase : int = {
"query": "attention.query",
"key": "attention.key",
"value": "attention.value",
"output.dense": "output",
"encoder_decoder_attention.o": "encoder_decoder_attention.attention.o",
"pre_self_attention_layer_norm": "self_attention.layer_norm",
"pre_cross_attention_layer_norm": "encoder_decoder_attention.layer_norm",
"mlp.": "mlp.DenseReluDense.",
"pre_mlp_layer_norm": "mlp.layer_norm",
"self_attention.o": "self_attention.attention.o",
"decoder.embeddings.embedding": "decoder.embed_tokens.weight",
"decoder.relpos_bias.rel_embedding": "decoder.layer.0.self_attention.attention.relative_attention_bias.weight",
"decoder.decoder_norm.weight": "decoder.final_layer_norm.weight",
"decoder.logits_dense.weight": "decoder.lm_head.weight",
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
_lowerCAmelCase : Optional[int] = ".".join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
_lowerCAmelCase : Any = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
_lowerCAmelCase : List[Any] = new_key.replace(_lowerCamelCase , _lowerCamelCase )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
_lowerCAmelCase : Dict = re.sub(r"layers_(\d+)" , r"layer.\1" , _lowerCamelCase )
_lowerCAmelCase : Dict = new_key.replace("encoder" , "encoder.encoder" )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
_lowerCAmelCase : Optional[int] = re.sub(r"layers_(\d+)" , r"layer.\1" , _lowerCamelCase )
_lowerCAmelCase : Dict = flax_dict[key]
_lowerCAmelCase : Optional[int] = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
_lowerCAmelCase : Any = torch.from_numpy(converted_dict[key].T )
else:
_lowerCAmelCase : List[str] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : str = get_flax_param(_lowerCamelCase )
if not use_large:
_lowerCAmelCase : Optional[Any] = PixaStructVisionConfig()
_lowerCAmelCase : Optional[int] = PixaStructTextConfig()
else:
_lowerCAmelCase : Optional[int] = PixaStructVisionConfig(
hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 )
_lowerCAmelCase : Dict = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 )
_lowerCAmelCase : Union[str, Any] = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowerCamelCase )
_lowerCAmelCase : List[str] = PixaStructForConditionalGeneration(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = rename_and_convert_flax_params(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
_lowerCAmelCase : List[str] = AutoTokenizer.from_pretrained("ybelkada/test-pix2struct-tokenizer" )
_lowerCAmelCase : Tuple = PixaStructImageProcessor()
_lowerCAmelCase : Dict = PixaStructProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
if use_large:
_lowerCAmelCase : Tuple = 4_096
_lowerCAmelCase : Union[str, Any] = True
# mkdir if needed
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
print("Model saved in {}".format(_lowerCamelCase ) )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--t5x_checkpoint_path", default=None, type=str, help="Path to the original T5x checkpoint.")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--use_large", action="store_true", help="Use large model.")
parser.add_argument("--is_vqa", action="store_true", help="Use large model.")
_snake_case = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class UpperCAmelCase_ ( pl.LightningModule):
def __init__( self, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : str = model
_lowerCAmelCase : Optional[Any] = 2
_lowerCAmelCase : List[Any] = nn.Linear(self.model.config.hidden_size, self.num_labels)
def snake_case__ ( self):
'''simple docstring'''
pass
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = LongformerModel.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = LightningModel(_lowerCamelCase )
_lowerCAmelCase : List[str] = torch.load(_lowerCamelCase , map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_lowerCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(_lowerCamelCase )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_lowerCamelCase )
print(F"Conversion successful. Model saved under {pytorch_dump_folder_path}" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
_snake_case = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'gptj'
lowerCamelCase__ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=5_0400, __a=2048, __a=4096, __a=28, __a=16, __a=64, __a=None, __a="gelu_new", __a=0.0, __a=0.0, __a=0.0, __a=1E-5, __a=0.02, __a=True, __a=5_0256, __a=5_0256, __a=False, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : Union[str, Any] = n_positions
_lowerCAmelCase : str = n_embd
_lowerCAmelCase : Dict = n_layer
_lowerCAmelCase : List[Any] = n_head
_lowerCAmelCase : List[str] = n_inner
_lowerCAmelCase : Union[str, Any] = rotary_dim
_lowerCAmelCase : List[Any] = activation_function
_lowerCAmelCase : Dict = resid_pdrop
_lowerCAmelCase : Union[str, Any] = embd_pdrop
_lowerCAmelCase : str = attn_pdrop
_lowerCAmelCase : Optional[Any] = layer_norm_epsilon
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Any = bos_token_id
_lowerCAmelCase : int = eos_token_id
super().__init__(
bos_token_id=__a, eos_token_id=__a, tie_word_embeddings=__a, **__a)
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a = "default", __a = None, __a = False, ):
'''simple docstring'''
super().__init__(__a, task=__a, patching_specs=__a, use_past=__a)
if not getattr(self._config, "pad_token_id", __a):
# TODO: how to do that better?
_lowerCAmelCase : Dict = 0
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(__a, direction="inputs")
_lowerCAmelCase : Dict = {0: "batch", 1: "past_sequence + sequence"}
else:
_lowerCAmelCase : Any = {0: "batch", 1: "sequence"}
return common_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_layer
@property
def snake_case__ ( self):
'''simple docstring'''
return self._config.n_head
def snake_case__ ( self, __a, __a = -1, __a = -1, __a = False, __a = None, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = super(__a, self).generate_dummy_inputs(
__a, batch_size=__a, seq_length=__a, is_pair=__a, framework=__a)
# We need to order the input in the way they appears in the forward()
_lowerCAmelCase : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
_lowerCAmelCase , _lowerCAmelCase : Any = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase : Tuple = seqlen + 2
_lowerCAmelCase : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowerCAmelCase : Optional[Any] = [
(torch.zeros(__a), torch.zeros(__a)) for _ in range(self.num_layers)
]
_lowerCAmelCase : Dict = common_inputs["attention_mask"]
if self.use_past:
_lowerCAmelCase : Optional[int] = ordered_inputs["attention_mask"].dtype
_lowerCAmelCase : Optional[Any] = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__a, __a, dtype=__a)], dim=1)
return ordered_inputs
@property
def snake_case__ ( self):
'''simple docstring'''
return 13
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_snake_case = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'facebook/nllb-200-distilled-600M'
lowerCamelCase__ = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
lowerCamelCase__ = 'translator'
lowerCamelCase__ = AutoTokenizer
lowerCamelCase__ = AutoModelForSeqaSeqLM
lowerCamelCase__ = LANGUAGE_CODES
lowerCamelCase__ = ['text', 'text', 'text']
lowerCamelCase__ = ['text']
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language.")
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language.")
_lowerCAmelCase : str = self.lang_to_code[src_lang]
_lowerCAmelCase : Optional[int] = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__a, return_tensors="pt", src_lang=__a, tgt_lang=__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.model.generate(**__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist(), skip_special_tokens=__a)
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_snake_case = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_snake_case = {"facebook/blenderbot_small-90M": 512}
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : str = set()
_lowerCAmelCase : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Optional[Any] = char
_lowerCAmelCase : Dict = set(_lowerCamelCase )
return pairs
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a, __a="__start__", __a="__end__", __a="__unk__", __a="__null__", **__a, ):
'''simple docstring'''
super().__init__(unk_token=__a, bos_token=__a, eos_token=__a, pad_token=__a, **__a)
with open(__a, encoding="utf-8") as vocab_handle:
_lowerCAmelCase : int = json.load(__a)
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
with open(__a, encoding="utf-8") as merges_handle:
_lowerCAmelCase : int = merges_handle.read().split("\n")[1:-1]
_lowerCAmelCase : Union[str, Any] = [tuple(merge.split()) for merge in merges]
_lowerCAmelCase : Any = dict(zip(__a, range(len(__a))))
_lowerCAmelCase : Any = {}
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.encoder)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : Optional[int] = re.sub("([.,!?()])", R" \1", __a)
_lowerCAmelCase : Tuple = re.sub("(')", R" \1 ", __a)
_lowerCAmelCase : Tuple = re.sub(R"\s{2,}", " ", __a)
if "\n" in token:
_lowerCAmelCase : Any = token.replace("\n", " __newln__")
_lowerCAmelCase : Dict = token.split(" ")
_lowerCAmelCase : Any = []
for token in tokens:
if not len(__a):
continue
_lowerCAmelCase : Any = token.lower()
_lowerCAmelCase : List[str] = tuple(__a)
_lowerCAmelCase : Union[str, Any] = tuple(list(word[:-1]) + [word[-1] + "</w>"])
_lowerCAmelCase : int = get_pairs(__a)
if not pairs:
words.append(__a)
continue
while True:
_lowerCAmelCase : Tuple = min(__a, key=lambda __a: self.bpe_ranks.get(__a, float("inf")))
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase : List[Any] = bigram
_lowerCAmelCase : int = []
_lowerCAmelCase : str = 0
while i < len(__a):
try:
_lowerCAmelCase : Optional[int] = word.index(__a, __a)
new_word.extend(word[i:j])
_lowerCAmelCase : str = j
except ValueError:
new_word.extend(word[i:])
break
if word[i] == first and i < len(__a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
_lowerCAmelCase : Optional[Any] = tuple(__a)
_lowerCAmelCase : List[Any] = new_word
if len(__a) == 1:
break
else:
_lowerCAmelCase : Optional[int] = get_pairs(__a)
_lowerCAmelCase : Tuple = "@@ ".join(__a)
_lowerCAmelCase : List[Any] = word[:-4]
_lowerCAmelCase : int = word
words.append(__a)
return " ".join(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : Dict = re.findall(R"\S+\n?", __a)
for token in words:
split_tokens.extend(list(self.bpe(__a).split(" ")))
return split_tokens
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = token.lower()
return self.encoder.get(__a, self.encoder.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.decoder.get(__a, self.unk_token)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = " ".join(__a).replace("@@ ", "").strip()
return out_string
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
if not os.path.isdir(__a):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
_lowerCAmelCase : Optional[int] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : List[str] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"])
with open(__a, "w", encoding="utf-8") as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=__a, ensure_ascii=__a) + "\n")
_lowerCAmelCase : List[Any] = 0
with open(__a, "w", encoding="utf-8") as writer:
writer.write("#version: 0.2\n")
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda __a: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!")
_lowerCAmelCase : Tuple = token_index
writer.write(" ".join(__a) + "\n")
index += 1
return vocab_file, merge_file
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_snake_case = {"configuration_speech_encoder_decoder": ["SpeechEncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["SpeechEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["FlaxSpeechEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import math
def A ( _lowerCamelCase ):
'''simple docstring'''
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_lowerCAmelCase : Union[str, Any] = range(3 , int(math.sqrt(_lowerCamelCase ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def A ( _lowerCamelCase , _lowerCamelCase=1 , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = factor * value
_lowerCAmelCase : List[str] = value
while not is_prime(_lowerCamelCase ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_lowerCamelCase )
return value
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase ) // 2
# choose the middle 3 elements
_lowerCAmelCase : Any = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
_snake_case = logging.getLogger(__name__)
_snake_case = "pytorch_model.bin"
@dataclasses.dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'})
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'})
lowerCamelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'})
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'A csv or a json file containing the validation data.'})
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'The name of the task to train on.'} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'The list of labels for the task.'})
@dataclasses.dataclass
class UpperCAmelCase_ :
lowerCamelCase__ = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'})
lowerCamelCase__ = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'})
lowerCamelCase__ = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]'
} , )
lowerCamelCase__ = dataclasses.field(
default=10 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCamelCase__ = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
lowerCamelCase__ = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
lowerCamelCase__ = dataclasses.field(
default=100 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCamelCase__ = dataclasses.field(
default=a , metadata={'help': 'Random seed for initialization.'} , )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
_lowerCAmelCase : Union[str, Any] = dataset.filter(lambda _lowerCamelCase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
_lowerCAmelCase : Optional[Any] = int(eval_result * len(_lowerCamelCase ) )
print(_lowerCamelCase )
_lowerCAmelCase : str = dataset.sort("probability" , reverse=_lowerCamelCase )
_lowerCAmelCase : Tuple = dataset.select(range(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = dataset.remove_columns(["label", "probability"] )
_lowerCAmelCase : Union[str, Any] = dataset.rename_column("prediction" , "label" )
_lowerCAmelCase : List[str] = dataset.map(lambda _lowerCamelCase : {"label": idalabel[example["label"]]} )
_lowerCAmelCase : List[Any] = dataset.shuffle(seed=args.seed )
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.data_file_extension == "csv":
dataset.to_csv(_lowerCamelCase , index=_lowerCamelCase )
else:
dataset.to_json(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Optional[Any] = STModelArguments(model_name_or_path=_lowerCamelCase )
_lowerCAmelCase : Any = STDataArguments(train_file=_lowerCamelCase , infer_file=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = STTrainingArguments(output_dir=_lowerCamelCase )
_lowerCAmelCase : List[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_lowerCamelCase ).items():
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
for key, value in kwargs.items():
if hasattr(_lowerCamelCase , _lowerCamelCase ):
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Sanity checks
_lowerCAmelCase : Dict = {}
_lowerCAmelCase : int = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
_lowerCAmelCase : str = args.train_file
_lowerCAmelCase : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
_lowerCAmelCase : Optional[int] = args.eval_file
for key in data_files:
_lowerCAmelCase : List[Any] = data_files[key].split("." )[-1]
assert extension in ["csv", "json"], F"`{key}_file` should be a csv or a json file."
if args.data_file_extension is None:
_lowerCAmelCase : List[str] = extension
else:
assert extension == args.data_file_extension, F"`{key}_file` should be a {args.data_file_extension} file`."
assert (
args.eval_metric in datasets.list_metrics()
), F"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("Creating the initial data directory for self-training..." )
_lowerCAmelCase : Union[str, Any] = F"{args.output_dir}/self-train_iter-{{}}".format
_lowerCAmelCase : Dict = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_lowerCamelCase )
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : List[str] = None
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Any = False
# Show the progress bar
_lowerCAmelCase : str = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
_lowerCAmelCase : Union[str, Any] = data_dir_format(_lowerCamelCase )
assert os.path.exists(_lowerCamelCase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , "stage-1" )
_lowerCAmelCase : List[Any] = {
"accelerator": accelerator,
"model_name_or_path": args.model_name_or_path,
"cache_dir": args.cache_dir,
"do_train": True,
"train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"],
"do_eval": True if args.eval_file is not None else False,
"eval_file": data_files["eval"],
"do_predict": True,
"infer_file": data_files["infer"],
"task_name": args.task_name,
"label_list": args.label_list,
"output_dir": current_output_dir,
"eval_metric": args.eval_metric,
"evaluation_strategy": args.evaluation_strategy,
"early_stopping_patience": args.early_stopping_patience,
"early_stopping_threshold": args.early_stopping_threshold,
"seed": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_lowerCamelCase , _lowerCamelCase ):
arguments_dict.update({key: value} )
_lowerCAmelCase : Optional[Any] = os.path.join(_lowerCamelCase , "best-checkpoint" , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , _lowerCamelCase )
finetune(**_lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 1." , _lowerCamelCase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , "best-checkpoint" )
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , "stage-2" )
# Update arguments_dict
_lowerCAmelCase : str = model_path
_lowerCAmelCase : Optional[Any] = data_files["train"]
_lowerCAmelCase : Union[str, Any] = current_output_dir
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , "best-checkpoint" , _lowerCamelCase )
if os.path.exists(_lowerCamelCase ):
logger.info(
"Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , _lowerCamelCase , _lowerCamelCase , )
else:
logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , _lowerCamelCase )
finetune(**_lowerCamelCase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowerCamelCase )
logger.info("Self-training job completed: iteration: %d, stage: 2." , _lowerCamelCase )
_lowerCAmelCase : int = iteration
_lowerCAmelCase : Optional[Any] = data_dir_format(iteration + 1 )
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(os.path.join(_lowerCamelCase , "best-checkpoint" ) )
_lowerCAmelCase : Any = config.idalabel
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , "eval_results_best-checkpoint.json" )
_lowerCAmelCase : Any = os.path.join(_lowerCamelCase , "test_results_best-checkpoint.json" )
assert os.path.exists(_lowerCamelCase )
with open(_lowerCamelCase , "r" ) as f:
_lowerCAmelCase : Dict = float(json.load(_lowerCamelCase )[args.eval_metric] )
_lowerCAmelCase : int = os.path.join(_lowerCamelCase , "infer_output_best-checkpoint.csv" )
assert os.path.exists(_lowerCamelCase )
# Loading the dataset from local csv or json files.
_lowerCAmelCase : Union[str, Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]} )["data"]
_lowerCAmelCase : Dict = load_dataset("csv" , data_files={"data": infer_output_file} )["data"]
if accelerator.is_main_process:
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F"eval_results_iter-{iteration}.json" ) )
if os.path.exists(_lowerCamelCase ):
shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , F"test_results_iter-{iteration}.json" ) )
create_pseudo_labeled_data(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
accelerator.wait_for_everyone()
_lowerCAmelCase : List[Any] = os.path.join(_lowerCamelCase , F"train_pseudo.{args.data_file_extension}" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
_lowerCAmelCase : Optional[Any] = eval_result
if best_iteration is None:
_lowerCAmelCase : Tuple = new_iteration
_lowerCAmelCase : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
_lowerCAmelCase : Dict = new_iteration
_lowerCAmelCase : str = new_eval_result
_lowerCAmelCase : Union[str, Any] = 0
else:
if new_eval_result == best_eval_result:
_lowerCAmelCase : Any = new_iteration
_lowerCAmelCase : Union[str, Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
_lowerCAmelCase : Union[str, Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("Best iteration: %d" , _lowerCamelCase )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F"eval_results_iter-{iteration}.json" ) , os.path.join(_lowerCamelCase , "eval_results_best-iteration.json" ) , )
else:
# Assume that the last iteration is the best
logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1 )
logger.info("Best evaluation result: %s = %f" , args.eval_metric , _lowerCamelCase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowerCamelCase , F"eval_results_iter-{args.max_selftrain_iterations - 1}.json" ) , os.path.join(_lowerCamelCase , "eval_results_best-iteration.json" ) , )
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = BeautifulSoup(requests.get(_lowerCamelCase , params=_lowerCamelCase ).content , "html.parser" )
_lowerCAmelCase : Any = soup.find("div" , attrs={"class": "gs_ri"} )
_lowerCAmelCase : str = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_snake_case = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 |
def A ( _lowerCamelCase = 1_000_000 ):
'''simple docstring'''
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Optional[Any] = 1
_lowerCAmelCase : List[str] = {1: 1}
for inputa in range(2 , _lowerCamelCase ):
_lowerCAmelCase : int = 0
_lowerCAmelCase : Any = inputa
while True:
if number in counters:
counter += counters[number]
break
if number % 2 == 0:
number //= 2
counter += 1
else:
_lowerCAmelCase : Any = (3 * number) + 1
counter += 1
if inputa not in counters:
_lowerCAmelCase : Tuple = counter
if counter > pre_counter:
_lowerCAmelCase : Union[str, Any] = inputa
_lowerCAmelCase : Union[str, Any] = counter
return largest_number
if __name__ == "__main__":
print(solution(int(input().strip())))
| 658 | 1 |
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_snake_case = ["bert-base-uncased", "bert-base-cased"]
_snake_case = "hf-internal-testing/tiny-bert-tf-only"
if is_tf_available():
class UpperCAmelCase_ ( tf.keras.Model):
def __init__( self, __a):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Dict = tokenizer
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(__a)
_lowerCAmelCase : Any = TFAutoModel.from_config(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tokenizer(__a)
_lowerCAmelCase : Tuple = self.bert(**__a)
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
_lowerCAmelCase : Optional[int] = [
BertTokenizer.from_pretrained(__a) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_lowerCAmelCase : Any = [TFBertTokenizer.from_pretrained(__a) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__a, use_fast_bert_tokenizer=__a)
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers) == len(self.tf_tokenizers)
_lowerCAmelCase : Optional[Any] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
_lowerCAmelCase : Any = list(zip(self.test_sentences, self.test_sentences[::-1]))
def snake_case__ ( self):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers):
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase : str = tokenizer(__a, return_tensors="tf", padding="longest")
_lowerCAmelCase : str = tf_tokenizer(__a)
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape))
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key], tf.intaa) == tf_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Optional[Any] = tf_tokenizer(self.paired_sentences)
_lowerCAmelCase : Union[str, Any] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences], text_pair=[sentence[1] for sentence in self.paired_sentences], )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key], tf.intaa) == separated_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : Optional[int] = tf.function(__a)
for test_inputs in (self.test_sentences, self.paired_sentences):
_lowerCAmelCase : Optional[Any] = tf.constant(__a)
_lowerCAmelCase : str = compiled_tokenizer(__a)
_lowerCAmelCase : str = tf_tokenizer(__a)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def snake_case__ ( self):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_lowerCAmelCase : str = ModelToSave(tokenizer=__a)
_lowerCAmelCase : Dict = tf.convert_to_tensor(self.test_sentences)
_lowerCAmelCase : str = model(__a) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_lowerCAmelCase : Union[str, Any] = Path(__a) / "saved.model"
model.save(__a)
_lowerCAmelCase : Dict = tf.keras.models.load_model(__a)
_lowerCAmelCase : int = loaded_model(__a)
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)), 1E-5)
| 658 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
_snake_case = "https://openaipublic.azureedge.net/jukebox/models/"
_snake_case = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def A ( _lowerCamelCase ):
'''simple docstring'''
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.1.bias" , ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Optional[int] = key.replace(".model.1.weight" , ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : Union[str, Any] = key.replace(".model.3.bias" , ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 10:
_lowerCAmelCase : int = key.replace(".model.3.weight" , ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
_lowerCAmelCase : List[str] = key.replace("conditioner_blocks.0" , "conditioner_blocks" )
if "prime_prior" in key:
_lowerCAmelCase : int = key.replace("prime_prior" , "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
_lowerCAmelCase : int = key.replace(".emb." , "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k" , ".codebook" )
if "y_emb." in key:
return key.replace("y_emb." , "metadata_embedding." )
if "x_emb.emb." in key:
_lowerCAmelCase : Tuple = key.replace("0.x_emb.emb" , "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln" , "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln" , ".layer_norm" )
if "_ln" in key:
return key.replace("_ln" , "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj" , "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out" , "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out" , "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb" , "embed_tokens" )
return key
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
import re
_lowerCAmelCase : Union[str, Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : int = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
_lowerCAmelCase : List[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
_lowerCAmelCase : Optional[int] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Any = re_encoder_block_conv_in.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Tuple = re_encoder_block_conv_in.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = re_encoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : str = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
_lowerCAmelCase : str = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Union[str, Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
_lowerCAmelCase : Optional[Any] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : int = prefix + resnet_block
_lowerCAmelCase : int = re_encoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_encoder_block_proj_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = re_encoder_block_proj_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
_lowerCAmelCase : str = re_encoder_block_proj_out.sub(_lowerCamelCase , _lowerCamelCase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Any = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Optional[int] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : str = re_decoder_block_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_decoder_block_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
_lowerCAmelCase : Union[str, Any] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
_lowerCAmelCase : Optional[int] = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : Dict = prefix + resnet_block
_lowerCAmelCase : Dict = re_decoder_block_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_decoder_block_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_decoder_block_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = regex_match.groups()
_lowerCAmelCase : Optional[Any] = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
_lowerCAmelCase : Any = re_decoder_block_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.match(_lowerCamelCase )
_lowerCAmelCase : List[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : Tuple = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
_lowerCAmelCase : Optional[int] = re_prior_cond_conv_out.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_resnet.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : List[str] = re_prior_cond_resnet.match(_lowerCamelCase )
_lowerCAmelCase : List[str] = regex_match.groups()
_lowerCAmelCase : Union[str, Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
_lowerCAmelCase : List[str] = {"1": 1, "3": 2}[groups[-2]]
_lowerCAmelCase : Optional[Any] = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
_lowerCAmelCase : Tuple = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
_lowerCAmelCase : List[Any] = prefix + resnet_block
_lowerCAmelCase : Optional[Any] = re_prior_cond_resnet.sub(_lowerCamelCase , _lowerCamelCase )
elif re_prior_cond_proj_in.fullmatch(_lowerCamelCase ):
_lowerCAmelCase : int = re_prior_cond_proj_in.match(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = regex_match.groups()
_lowerCAmelCase : Optional[int] = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
_lowerCAmelCase : List[str] = re_prior_cond_proj_in.sub(_lowerCamelCase , _lowerCamelCase )
# keep original key
else:
_lowerCAmelCase : Optional[int] = original_key
_lowerCAmelCase : Tuple = replace_key(_lowerCamelCase )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
_lowerCAmelCase : Any = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
_lowerCAmelCase : Tuple = original_key
_lowerCAmelCase : List[Any] = original_key
_lowerCAmelCase : Optional[int] = value
return new_dict
@torch.no_grad()
def A ( _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
_lowerCAmelCase : List[Any] = requests.get(F"{PREFIX}{file}" , allow_redirects=_lowerCamelCase )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=_lowerCamelCase )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , "wb" ).write(r.content )
_lowerCAmelCase : Optional[Any] = MODEL_MAPPING[model_name.split("/" )[-1]]
_lowerCAmelCase : Tuple = JukeboxConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = JukeboxModel(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = []
_lowerCAmelCase : List[Any] = {}
for i, dict_name in enumerate(_lowerCamelCase ):
_lowerCAmelCase : Any = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )["model"]
_lowerCAmelCase : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
_lowerCAmelCase : Dict = old_dic[k]
elif k.endswith(".w" ):
_lowerCAmelCase : Tuple = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
_lowerCAmelCase : str = old_dic[k]
else:
_lowerCAmelCase : Union[str, Any] = old_dic[k]
_lowerCAmelCase : Union[str, Any] = "vqvae" if i == 0 else F"priors.{3 - i}"
_lowerCAmelCase : Union[str, Any] = fix_jukebox_keys(_lowerCamelCase , model.state_dict() , _lowerCamelCase , _lowerCamelCase )
weight_dict.append(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = weight_dict.pop(0 )
model.vqvae.load_state_dict(_lowerCamelCase )
for i in range(len(_lowerCamelCase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
with open(F"{pytorch_dump_folder_path}/mapping.json" , "w" ) as txtfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCamelCase )
return weight_dict
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
_snake_case = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 658 | 1 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = "ylacombe/bark-small"
_lowerCAmelCase : int = tempfile.mkdtemp()
_lowerCAmelCase : Dict = "en_speaker_1"
_lowerCAmelCase : str = "This is a test string"
_lowerCAmelCase : List[Any] = "speaker_embeddings_path.json"
_lowerCAmelCase : Any = "speaker_embeddings"
def snake_case__ ( self, **__a):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint, **__a)
def snake_case__ ( self):
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=__a)
processor.save_pretrained(self.tmpdirname)
_lowerCAmelCase : Tuple = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, )
processor.save_pretrained(
self.tmpdirname, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, speaker_embeddings_directory=self.speaker_embeddings_directory, )
_lowerCAmelCase : str = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
self.tmpdirname, self.speaker_embeddings_dict_path, bos_token="(BOS)", eos_token="(EOS)", )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint, speaker_embeddings_dict_path=self.speaker_embeddings_dict_path, )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : int = 8
_lowerCAmelCase : str = {
"semantic_prompt": np.ones(__a),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len)),
"fine_prompt": np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string, voice_preset=__a)
_lowerCAmelCase : List[str] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(__a, np.array([])).tolist())
# test loading voice preset from npz file
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname, "file.npz")
np.savez(__a, **__a)
_lowerCAmelCase : List[str] = processor(text=self.input_string, voice_preset=__a)
_lowerCAmelCase : Optional[int] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist(), processed_voice_preset.get(__a, np.array([])).tolist())
# test loading voice preset from the hub
_lowerCAmelCase : int = processor(text=self.input_string, voice_preset=self.voice_preset)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=__a)
_lowerCAmelCase : Dict = processor(text=self.input_string)
_lowerCAmelCase : Union[str, Any] = tokenizer(
self.input_string, padding="max_length", max_length=256, add_special_tokens=__a, return_attention_mask=__a, return_token_type_ids=__a, )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key].squeeze().tolist())
| 658 |
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (ksize % 2) == 0:
_lowerCAmelCase : str = ksize + 1
_lowerCAmelCase : List[str] = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(_lowerCamelCase ):
for x in range(_lowerCamelCase ):
# distance from center
_lowerCAmelCase : int = x - ksize // 2
_lowerCAmelCase : Dict = y - ksize // 2
# degree to radiant
_lowerCAmelCase : List[Any] = theta / 180 * np.pi
_lowerCAmelCase : int = np.cos(_theta )
_lowerCAmelCase : Optional[int] = np.sin(_theta )
# get kernel x
_lowerCAmelCase : int = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase : str = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase : Union[str, Any] = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread("../image_data/lena.jpg")
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 30, 60, 90, 120, 150]:
_snake_case = gabor_filter_kernel(10, 8, theta, 10, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 255
_snake_case = out.astype(np.uinta)
imshow("Original", gray)
imshow("Gabor filter with 20x20 mask and 6 directions", out)
waitKey(0)
| 658 | 1 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
_snake_case = parse(importlib.metadata.version("torch"))
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}" )
_lowerCAmelCase : List[str] = STR_OPERATION_TO_FUNC[operation]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : str = parse(importlib.metadata.version(_lowerCamelCase ) )
return operation(_lowerCamelCase , parse(_lowerCamelCase ) )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return compare_versions(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = len(_lowerCamelCase )
for i in range(1 , _lowerCamelCase ):
_lowerCAmelCase : List[Any] = collection[i]
_lowerCAmelCase : str = 0
_lowerCAmelCase : Union[str, Any] = i - 1
while low <= high:
_lowerCAmelCase : List[str] = (low + high) // 2
if val < collection[mid]:
_lowerCAmelCase : Optional[int] = mid - 1
else:
_lowerCAmelCase : List[str] = mid + 1
for j in range(_lowerCamelCase , _lowerCamelCase , -1 ):
_lowerCAmelCase : int = collection[j - 1]
_lowerCAmelCase : Optional[int] = val
return collection
if __name__ == "__main__":
_snake_case = input("Enter numbers separated by a comma:\n").strip()
_snake_case = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 658 | 1 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = int(number**0.5 )
return number == sq * sq
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
_lowerCAmelCase : int = x_den * y_den * z_den
_lowerCAmelCase : int = gcd(_lowerCamelCase , _lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def A ( _lowerCamelCase = 35 ):
'''simple docstring'''
_lowerCAmelCase : set = set()
_lowerCAmelCase : int
_lowerCAmelCase : Fraction = Fraction(0 )
_lowerCAmelCase : tuple[int, int]
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
_lowerCAmelCase : int = x_num * y_den + x_den * y_num
_lowerCAmelCase : List[str] = x_den * y_den
_lowerCAmelCase : str = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Union[str, Any] = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=2
_lowerCAmelCase : List[str] = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
_lowerCAmelCase : Tuple = x_den * x_den * y_den * y_den
if is_sq(_lowerCamelCase ) and is_sq(_lowerCamelCase ):
_lowerCAmelCase : str = int(sqrt(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = int(sqrt(_lowerCamelCase ) )
_lowerCAmelCase : Any = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Optional[int] = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=-1
_lowerCAmelCase : int = x_num * y_num
_lowerCAmelCase : Dict = x_den * y_num + x_num * y_den
_lowerCAmelCase : Union[str, Any] = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Dict = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=2
_lowerCAmelCase : List[str] = x_num * x_num * y_num * y_num
_lowerCAmelCase : List[Any] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowerCamelCase ) and is_sq(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = int(sqrt(_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = int(sqrt(_lowerCamelCase ) )
_lowerCAmelCase : List[str] = gcd(_lowerCamelCase , _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
_lowerCAmelCase : Optional[int] = add_three(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
unique_s.add(_lowerCamelCase )
for num, den in unique_s:
total += Fraction(_lowerCamelCase , _lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_snake_case = logging.get_logger(__name__)
_snake_case = {
"microsoft/focalnet-tiny": "https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json",
}
class UpperCAmelCase_ ( a , a):
lowerCamelCase__ = 'focalnet'
def __init__( self, __a=224, __a=4, __a=3, __a=96, __a=False, __a=[192, 384, 768, 768], __a=[2, 2, 6, 2], __a=[2, 2, 2, 2], __a=[3, 3, 3, 3], __a="gelu", __a=4.0, __a=0.0, __a=0.1, __a=False, __a=1E-4, __a=False, __a=False, __a=False, __a=0.02, __a=1E-5, __a=32, __a=None, __a=None, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : str = image_size
_lowerCAmelCase : List[str] = patch_size
_lowerCAmelCase : List[Any] = num_channels
_lowerCAmelCase : Tuple = embed_dim
_lowerCAmelCase : List[Any] = use_conv_embed
_lowerCAmelCase : Any = hidden_sizes
_lowerCAmelCase : Tuple = depths
_lowerCAmelCase : Dict = focal_levels
_lowerCAmelCase : Optional[Any] = focal_windows
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : Union[str, Any] = mlp_ratio
_lowerCAmelCase : Any = hidden_dropout_prob
_lowerCAmelCase : Dict = drop_path_rate
_lowerCAmelCase : str = use_layerscale
_lowerCAmelCase : str = layerscale_value
_lowerCAmelCase : Union[str, Any] = use_post_layernorm
_lowerCAmelCase : Optional[int] = use_post_layernorm_in_modulation
_lowerCAmelCase : str = normalize_modulator
_lowerCAmelCase : Any = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = encoder_stride
_lowerCAmelCase : List[str] = ["stem"] + [f"stage{idx}" for idx in range(1, len(self.depths) + 1)]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_aligned_output_features_output_indices(
out_features=__a, out_indices=__a, stage_names=self.stage_names)
| 658 | 1 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir("fixtures/test_sentencepiece_with_bytefallback.model")
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( a , unittest.TestCase):
lowerCamelCase__ = GPTSwaTokenizer
lowerCamelCase__ = False
lowerCamelCase__ = True
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : Dict = GPTSwaTokenizer(__a, eos_token="<unk>", bos_token="<unk>", pad_token="<unk>")
tokenizer.save_pretrained(self.tmpdirname)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : str = "This is a test"
_lowerCAmelCase : List[Any] = "This is a test"
return input_text, output_text
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = "<s>"
_lowerCAmelCase : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a), __a)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a), __a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], "<unk>")
self.assertEqual(vocab_keys[1], "<s>")
self.assertEqual(vocab_keys[-1], "j")
self.assertEqual(len(__a), 2000)
def snake_case__ ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size, 2000)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : List[Any] = GPTSwaTokenizer(__a)
_lowerCAmelCase : List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(__a, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a), [465, 287, 265, 631, 842])
_lowerCAmelCase : Union[str, Any] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
# fmt: off
self.assertListEqual(
__a, ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."], )
# fmt: on
_lowerCAmelCase : Union[str, Any] = tokenizer.convert_tokens_to_ids(__a)
self.assertListEqual(
__a, [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260], )
_lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(__a)
# fmt: off
self.assertListEqual(
__a, ["▁I", "▁was", "▁bor", "n", "▁in", "▁", "<0x39>", "2", "0", "0", "0", ",", "▁and", "▁this", "▁is", "▁f", "al", "s", "<0xC3>", "<0xA9>", "."])
# fmt: on
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = GPTSwaTokenizer(__a)
_lowerCAmelCase : int = ["This is a test", "I was born in 92000, and this is falsé."]
_lowerCAmelCase : str = [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__a, __a):
self.assertListEqual(tokenizer.encode_fast(__a), __a)
# Test that decode_fast returns the input text
for text, token_ids in zip(__a, __a):
self.assertEqual(tokenizer.decode_fast(__a), __a)
@slow
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = [
"<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Häj sväjs lillebrör! =)",
"Det är inget fel på Mr. Cool",
]
# fmt: off
_lowerCAmelCase : str = {"input_ids": [[6_3423, 5, 6811, 1_4954, 282, 816, 3821, 6_3466, 6_3425, 6_3462, 18, 6_3978, 678, 301, 1320, 6_3423, 6_3455, 6_3458, 18, 6_3982, 4246, 3940, 1901, 4_7789, 5547, 1_8994], [1_9630, 1100, 6_3446, 1342, 633, 544, 4488, 593, 5102, 2416, 6_3495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 5_8593, 2_2413, 9106, 546, 268, 3_3213, 6_3979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5130, 6_3450, 924, 6_3449, 2249, 4062, 1558, 318, 6_3504, 2_1498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 6_3443, 2_6801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a, model_name="AI-Sweden/gpt-sw3-126m", sequences=__a, )
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations(_lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item ) for item in array )
return count_of_possible_combinations(_lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
def count_of_possible_combinations_with_dp_array(
_lowerCamelCase , _lowerCamelCase ) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
_lowerCAmelCase : Optional[int] = sum(
count_of_possible_combinations_with_dp_array(target - item , _lowerCamelCase )
for item in array )
_lowerCAmelCase : Any = answer
return answer
_lowerCAmelCase : List[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(_lowerCamelCase , _lowerCamelCase )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [0] * (target + 1)
_lowerCAmelCase : List[str] = 1
for i in range(1 , target + 1 ):
for j in range(_lowerCamelCase ):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = 3
_snake_case = 5
_snake_case = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 658 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = os.path.join(args.tf_model_dir , "parameters.json" )
_lowerCAmelCase : Optional[Any] = json.loads(open(_lowerCamelCase ).read() )
if not params:
raise ValueError(
F"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith(".pt" ):
_lowerCAmelCase : Dict = args.output + ".pt"
_lowerCAmelCase : Any = OrderedDict()
with tf.device("/CPU:0" ):
_lowerCAmelCase : List[Any] = tf.train.load_checkpoint(args.tf_model_dir )
_lowerCAmelCase : Dict = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
_lowerCAmelCase : Tuple = reader.get_tensor(_lowerCamelCase ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
_lowerCAmelCase : Any = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
_lowerCAmelCase : Union[str, Any] = 8
_lowerCAmelCase : Union[str, Any] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
_lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Tuple = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/moe" ):
_lowerCAmelCase : Optional[int] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
_lowerCAmelCase : List[str] = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
_lowerCAmelCase : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Optional[int] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/softmlp/kernel" ):
_lowerCAmelCase : int = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
_lowerCAmelCase : List[Any] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : List[Any] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
_lowerCAmelCase : List[Any] = key_name[-9:-7]
for i in range(16 ):
_lowerCAmelCase : Tuple = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
_lowerCAmelCase : Any = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/mlp" ):
_lowerCAmelCase : List[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
_lowerCAmelCase : Optional[int] = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
_lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : List[Any] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/p1/bias" ):
_lowerCAmelCase : str = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
_lowerCAmelCase : List[Any] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : int = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/p2/kernel" ):
_lowerCAmelCase : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
_lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/p2/bias" ):
_lowerCAmelCase : Union[str, Any] = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
_lowerCAmelCase : List[str] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Dict = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/ln" ):
_lowerCAmelCase : Tuple = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_lowerCAmelCase : Dict = "model.blocks.%d.feed_forward.norm.bias" % player
_lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Union[str, Any] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/g" ):
_lowerCAmelCase : str = "model.blocks.%d.feed_forward.norm.weight" % player
_lowerCAmelCase : Any = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : str = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/att" ):
_lowerCAmelCase : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
_lowerCAmelCase : Any = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
_lowerCAmelCase : Tuple = state[:, 0, :, :]
_lowerCAmelCase : int = state[:, 1, :, :]
_lowerCAmelCase : List[str] = state[:, 2, :, :]
_lowerCAmelCase : Dict = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Any = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Optional[Any] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : str = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
_lowerCAmelCase : Any = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
_lowerCAmelCase : Optional[Any] = torch.tensor(_lowerCamelCase )
_lowerCAmelCase : Tuple = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/o/kernel" ):
_lowerCAmelCase : str = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
_lowerCAmelCase : Dict = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : int = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/an" ):
_lowerCAmelCase : Union[str, Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
_lowerCAmelCase : Optional[int] = "model.blocks.%d.self_attn.norm.bias" % player
_lowerCAmelCase : Optional[Any] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Union[str, Any] = torch.tensor(_lowerCamelCase )
elif key_name.endswith("/g" ):
_lowerCAmelCase : Tuple = "model.blocks.%d.self_attn.norm.weight" % player
_lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : int = torch.tensor(_lowerCamelCase )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
_lowerCAmelCase : Tuple = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
_lowerCAmelCase : Union[str, Any] = "model.%s.weight" % nlayer
_lowerCAmelCase : Union[str, Any] = vnp.copy() # same in embedded
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
if key_name.startswith("model/wte" ):
_lowerCAmelCase : Any = "lm_head.weight"
_lowerCAmelCase : str = vnp.copy() # same in embedded
_lowerCAmelCase : Optional[int] = torch.tensor(_lowerCamelCase )
elif key_name.startswith("model/wob" ):
_lowerCAmelCase : Optional[int] = "final_logits_bias"
_lowerCAmelCase : Dict = vnp.copy() # same in embedded
_lowerCAmelCase : List[str] = state.reshape((1, -1) )
_lowerCAmelCase : Union[str, Any] = torch.tensor(_lowerCamelCase )
elif key_name == "model/dense/kernel":
_lowerCAmelCase : Union[str, Any] = "model.last_project.weight"
_lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
_lowerCAmelCase : Any = torch.tensor(_lowerCamelCase )
elif key_name == "model/dense_1/bias":
_lowerCAmelCase : Optional[Any] = "model.last_project.bias"
_lowerCAmelCase : Optional[int] = vnp.copy() # same because it is one dimensional
_lowerCAmelCase : Dict = torch.tensor(_lowerCamelCase )
torch.save(_lowerCamelCase , args.output )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(
description="model converter.", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("--tf_model_dir", metavar="PATH", type=str, required=True, help="import model")
parser.add_argument("--output", metavar="PATH", type=str, required=True, help="output model")
_snake_case = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 658 |
import string
def A ( _lowerCamelCase ):
'''simple docstring'''
for key in range(len(string.ascii_uppercase ) ):
_lowerCAmelCase : str = ""
for symbol in message:
if symbol in string.ascii_uppercase:
_lowerCAmelCase : List[str] = string.ascii_uppercase.find(_lowerCamelCase )
_lowerCAmelCase : Dict = num - key
if num < 0:
_lowerCAmelCase : Dict = num + len(string.ascii_uppercase )
_lowerCAmelCase : Optional[Any] = translated + string.ascii_uppercase[num]
else:
_lowerCAmelCase : int = translated + symbol
print(F"Decryption using Key #{key}: {translated}" )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Tuple = input("Encrypted message: " )
_lowerCAmelCase : Dict = message.upper()
decrypt(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {"configuration_wavlm": ["WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "WavLMConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"WavLMForAudioFrameClassification",
"WavLMForCTC",
"WavLMForSequenceClassification",
"WavLMForXVector",
"WavLMModel",
"WavLMPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavlm import WAVLM_PRETRAINED_CONFIG_ARCHIVE_MAP, WavLMConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavlm import (
WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST,
WavLMForAudioFrameClassification,
WavLMForCTC,
WavLMForSequenceClassification,
WavLMForXVector,
WavLMModel,
WavLMPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
import requests
from bsa import BeautifulSoup
def A ( _lowerCamelCase = "https://www.worldometers.info/coronavirus" ):
'''simple docstring'''
_lowerCAmelCase : str = BeautifulSoup(requests.get(_lowerCamelCase ).text , "html.parser" )
_lowerCAmelCase : str = soup.findAll("h1" )
_lowerCAmelCase : Optional[int] = soup.findAll("div" , {"class": "maincounter-number"} )
keys += soup.findAll("span" , {"class": "panel-title"} )
values += soup.findAll("div" , {"class": "number-table-main"} )
return {key.text.strip(): value.text.strip() for key, value in zip(_lowerCamelCase , _lowerCamelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 658 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['input_features', 'is_longer']
def __init__( self, __a=64, __a=4_8000, __a=480, __a=10, __a=1024, __a=0.0, __a=False, __a = 0, __a = 1_4000, __a = None, __a = "fusion", __a = "repeatpad", **__a, ):
'''simple docstring'''
super().__init__(
feature_size=__a, sampling_rate=__a, padding_value=__a, return_attention_mask=__a, **__a, )
_lowerCAmelCase : List[str] = top_db
_lowerCAmelCase : Dict = truncation
_lowerCAmelCase : Dict = padding
_lowerCAmelCase : List[str] = fft_window_size
_lowerCAmelCase : int = (fft_window_size >> 1) + 1
_lowerCAmelCase : Any = hop_length
_lowerCAmelCase : Any = max_length_s
_lowerCAmelCase : int = max_length_s * sampling_rate
_lowerCAmelCase : Tuple = sampling_rate
_lowerCAmelCase : Dict = frequency_min
_lowerCAmelCase : int = frequency_max
_lowerCAmelCase : int = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=__a, min_frequency=__a, max_frequency=__a, sampling_rate=__a, norm=__a, mel_scale="htk", )
_lowerCAmelCase : Optional[int] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins, num_mel_filters=__a, min_frequency=__a, max_frequency=__a, sampling_rate=__a, norm="slaney", mel_scale="slaney", )
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = copy.deepcopy(self.__dict__)
_lowerCAmelCase : int = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : str = spectrogram(
__a, window_function(self.fft_window_size, "hann"), frame_length=self.fft_window_size, hop_length=self.hop_length, power=2.0, mel_filters=__a, log_mel="dB", )
return log_mel_spectrogram.T
def snake_case__ ( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = np.array_split(list(range(0, total_frames - chunk_frames + 1)), 3)
if len(ranges[1]) == 0:
# if the audio is too short, we just use the first chunk
_lowerCAmelCase : List[Any] = [0]
if len(ranges[2]) == 0:
# if the audio is too short, we just use the first chunk
_lowerCAmelCase : List[Any] = [0]
# randomly choose index for each part
_lowerCAmelCase : str = np.random.choice(ranges[0])
_lowerCAmelCase : List[Any] = np.random.choice(ranges[1])
_lowerCAmelCase : Tuple = np.random.choice(ranges[2])
_lowerCAmelCase : Dict = mel[idx_front : idx_front + chunk_frames, :]
_lowerCAmelCase : Tuple = mel[idx_middle : idx_middle + chunk_frames, :]
_lowerCAmelCase : List[str] = mel[idx_back : idx_back + chunk_frames, :]
_lowerCAmelCase : Dict = torch.tensor(mel[None, None, :])
_lowerCAmelCase : Optional[Any] = torch.nn.functional.interpolate(
__a, size=[chunk_frames, 64], mode="bilinear", align_corners=__a)
_lowerCAmelCase : Tuple = mel_shrink[0][0].numpy()
_lowerCAmelCase : Dict = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back], axis=0)
return mel_fusion
def snake_case__ ( self, __a, __a, __a, __a):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_lowerCAmelCase : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_lowerCAmelCase : List[str] = len(__a) - max_length
_lowerCAmelCase : Union[str, Any] = np.random.randint(0, overflow + 1)
_lowerCAmelCase : str = waveform[idx : idx + max_length]
_lowerCAmelCase : Any = self._np_extract_fbank_features(__a, self.mel_filters_slaney)[None, :]
elif truncation == "fusion":
_lowerCAmelCase : List[Any] = self._np_extract_fbank_features(__a, self.mel_filters)
_lowerCAmelCase : List[Any] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_lowerCAmelCase : Union[str, Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_lowerCAmelCase : int = np.stack([mel, mel, mel, mel], axis=0)
_lowerCAmelCase : int = False
else:
_lowerCAmelCase : Any = self._random_mel_fusion(__a, __a, __a)
_lowerCAmelCase : str = True
else:
raise NotImplementedError(f"data_truncating {truncation} not implemented")
else:
_lowerCAmelCase : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_lowerCAmelCase : Optional[Any] = int(max_length / len(__a))
_lowerCAmelCase : Dict = np.stack(np.tile(__a, n_repeat + 1))[:max_length]
if padding == "repeatpad":
_lowerCAmelCase : Any = int(max_length / len(__a))
_lowerCAmelCase : Optional[Any] = np.stack(np.tile(__a, __a))
_lowerCAmelCase : Optional[int] = np.pad(__a, (0, max_length - waveform.shape[0]), mode="constant", constant_values=0)
if truncation == "fusion":
_lowerCAmelCase : Union[str, Any] = self._np_extract_fbank_features(__a, self.mel_filters)
_lowerCAmelCase : List[str] = np.stack([input_mel, input_mel, input_mel, input_mel], axis=0)
else:
_lowerCAmelCase : List[str] = self._np_extract_fbank_features(__a, self.mel_filters_slaney)[None, :]
return input_mel, longer
def __call__( self, __a, __a = None, __a = None, __a = None, __a = None, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = truncation if truncation is not None else self.truncation
_lowerCAmelCase : Tuple = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"
f" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"
f" was sampled with {self.sampling_rate} and not {sampling_rate}.")
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug.")
_lowerCAmelCase : List[Any] = isinstance(__a, np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}")
_lowerCAmelCase : str = is_batched_numpy or (
isinstance(__a, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
)
if is_batched:
_lowerCAmelCase : Optional[Any] = [np.asarray(__a, dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(__a, np.ndarray):
_lowerCAmelCase : List[Any] = np.asarray(__a, dtype=np.floataa)
elif isinstance(__a, np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
_lowerCAmelCase : Any = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
_lowerCAmelCase : List[str] = [np.asarray(__a)]
# convert to mel spectrogram, truncate and pad if needed.
_lowerCAmelCase : Any = [
self._get_input_mel(__a, max_length if max_length else self.nb_max_samples, __a, __a)
for waveform in raw_speech
]
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : List[Any] = []
for mel, longer in padded_inputs:
input_mel.append(__a)
is_longer.append(__a)
if truncation == "fusion" and sum(__a) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_lowerCAmelCase : Tuple = np.random.randint(0, len(__a))
_lowerCAmelCase : Tuple = True
if isinstance(input_mel[0], __a):
_lowerCAmelCase : Tuple = [np.asarray(__a, dtype=np.floataa) for feature in input_mel]
# is_longer is a list of bool
_lowerCAmelCase : str = [[longer] for longer in is_longer]
_lowerCAmelCase : str = {"input_features": input_mel, "is_longer": is_longer}
_lowerCAmelCase : Optional[int] = BatchFeature(__a)
if return_tensors is not None:
_lowerCAmelCase : Optional[int] = input_features.convert_to_tensors(__a)
return input_features
| 658 |
from __future__ import annotations
from collections.abc import MutableSequence
class UpperCAmelCase_ :
def __init__( self, __a, __a):
'''simple docstring'''
if len(__a) != degree + 1:
raise ValueError(
"The number of coefficients should be equal to the degree + 1.")
_lowerCAmelCase : list[float] = list(__a)
_lowerCAmelCase : Any = degree
def __add__( self, __a):
'''simple docstring'''
if self.degree > polynomial_a.degree:
_lowerCAmelCase : Dict = self.coefficients[:]
for i in range(polynomial_a.degree + 1):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree, __a)
else:
_lowerCAmelCase : Union[str, Any] = polynomial_a.coefficients[:]
for i in range(self.degree + 1):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree, __a)
def __sub__( self, __a):
'''simple docstring'''
return self + polynomial_a * Polynomial(0, [-1])
def __neg__( self):
'''simple docstring'''
return Polynomial(self.degree, [-c for c in self.coefficients])
def __mul__( self, __a):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1):
for j in range(polynomial_a.degree + 1):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree, __a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : int | float = 0
for i in range(self.degree + 1):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = ""
for i in range(self.degree, -1, -1):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i]))
elif i == 1:
polynomial += str(abs(self.coefficients[i])) + "x"
else:
polynomial += str(abs(self.coefficients[i])) + "x^" + str(__a)
return polynomial
def __repr__( self):
'''simple docstring'''
return self.__str__()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * self.degree
for i in range(self.degree):
_lowerCAmelCase : List[Any] = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1, __a)
def snake_case__ ( self, __a = 0):
'''simple docstring'''
_lowerCAmelCase : list[float] = [0] * (self.degree + 2)
_lowerCAmelCase : Optional[Any] = constant
for i in range(self.degree + 1):
_lowerCAmelCase : Dict = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1, __a)
def __eq__( self, __a):
'''simple docstring'''
if not isinstance(__a, __a):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self, __a):
'''simple docstring'''
return not self.__eq__(__a)
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_snake_case = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"xlnet-base-cased": "https://huggingface.co/xlnet-base-cased/resolve/main/config.json",
"xlnet-large-cased": "https://huggingface.co/xlnet-large-cased/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'xlnet'
lowerCamelCase__ = ['mems']
lowerCamelCase__ = {
'n_token': 'vocab_size', # Backward compatibility
'hidden_size': 'd_model',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self, __a=3_2000, __a=1024, __a=24, __a=16, __a=4096, __a="gelu", __a=True, __a="bi", __a=0.02, __a=1E-12, __a=0.1, __a=512, __a=None, __a=True, __a=False, __a=False, __a=-1, __a=False, __a="last", __a=True, __a="tanh", __a=0.1, __a=5, __a=5, __a=5, __a=1, __a=2, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = vocab_size
_lowerCAmelCase : Optional[int] = d_model
_lowerCAmelCase : Tuple = n_layer
_lowerCAmelCase : List[Any] = n_head
if d_model % n_head != 0:
raise ValueError(f"'d_model % n_head' ({d_model % n_head}) should be equal to 0")
if "d_head" in kwargs:
if kwargs["d_head"] != d_model // n_head:
raise ValueError(
f"`d_head` ({kwargs['d_head']}) should be equal to `d_model // n_head` ({d_model // n_head})")
_lowerCAmelCase : Optional[int] = d_model // n_head
_lowerCAmelCase : List[str] = ff_activation
_lowerCAmelCase : Tuple = d_inner
_lowerCAmelCase : List[Any] = untie_r
_lowerCAmelCase : List[str] = attn_type
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Any = layer_norm_eps
_lowerCAmelCase : List[Any] = dropout
_lowerCAmelCase : Optional[int] = mem_len
_lowerCAmelCase : Union[str, Any] = reuse_len
_lowerCAmelCase : List[str] = bi_data
_lowerCAmelCase : List[str] = clamp_len
_lowerCAmelCase : Any = same_length
_lowerCAmelCase : List[str] = summary_type
_lowerCAmelCase : int = summary_use_proj
_lowerCAmelCase : Optional[Any] = summary_activation
_lowerCAmelCase : Tuple = summary_last_dropout
_lowerCAmelCase : Union[str, Any] = start_n_top
_lowerCAmelCase : Optional[int] = end_n_top
_lowerCAmelCase : Tuple = bos_token_id
_lowerCAmelCase : List[Any] = pad_token_id
_lowerCAmelCase : Dict = eos_token_id
if "use_cache" in kwargs:
warnings.warn(
"The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems_eval`"
" instead.", __a, )
_lowerCAmelCase : Union[str, Any] = kwargs["use_cache"]
_lowerCAmelCase : Union[str, Any] = use_mems_eval
_lowerCAmelCase : Any = use_mems_train
super().__init__(pad_token_id=__a, bos_token_id=__a, eos_token_id=__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit.")
return -1
@max_position_embeddings.setter
def snake_case__ ( self, __a):
'''simple docstring'''
raise NotImplementedError(
f"The model {self.model_type} is one of the few models that has no sequence length limit.")
| 658 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( a , a , unittest.TestCase):
lowerCamelCase__ = StableDiffusionSAGPipeline
lowerCamelCase__ = TEXT_TO_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase__ = False
def snake_case__ ( self):
'''simple docstring'''
torch.manual_seed(0)
_lowerCAmelCase : int = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, )
_lowerCAmelCase : Any = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=__a, set_alpha_to_one=__a, )
torch.manual_seed(0)
_lowerCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, )
torch.manual_seed(0)
_lowerCAmelCase : Any = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, )
_lowerCAmelCase : List[str] = CLIPTextModel(__a)
_lowerCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
_lowerCAmelCase : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def snake_case__ ( self, __a, __a=0):
'''simple docstring'''
if str(__a).startswith("mps"):
_lowerCAmelCase : Any = torch.manual_seed(__a)
else:
_lowerCAmelCase : int = torch.Generator(device=__a).manual_seed(__a)
_lowerCAmelCase : Tuple = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def snake_case__ ( self):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3)
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase):
def snake_case__ ( self):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
_lowerCAmelCase : str = sag_pipe.to(__a)
sag_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Any = "."
_lowerCAmelCase : Tuple = torch.manual_seed(0)
_lowerCAmelCase : List[Any] = sag_pipe(
[prompt], generator=__a, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np")
_lowerCAmelCase : Any = output.images
_lowerCAmelCase : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : List[str] = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
_lowerCAmelCase : List[Any] = sag_pipe.to(__a)
sag_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : List[str] = "."
_lowerCAmelCase : List[str] = torch.manual_seed(0)
_lowerCAmelCase : Optional[int] = sag_pipe(
[prompt], generator=__a, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np")
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
_lowerCAmelCase : Any = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371])
assert np.abs(image_slice.flatten() - expected_slice).max() < 5E-2
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Dict = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
_lowerCAmelCase : int = sag_pipe.to(__a)
sag_pipe.set_progress_bar_config(disable=__a)
_lowerCAmelCase : Optional[Any] = "."
_lowerCAmelCase : Union[str, Any] = torch.manual_seed(0)
_lowerCAmelCase : List[str] = sag_pipe(
[prompt], width=768, height=512, generator=__a, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np", )
_lowerCAmelCase : Any = output.images
assert image.shape == (1, 512, 768, 3)
| 658 |
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(100, 0.25) = }''')
print(f'''{price_plus_tax(125.50, 0.05) = }''')
| 658 | 1 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( a):
lowerCamelCase__ = ['image_processor', 'tokenizer']
lowerCamelCase__ = 'BridgeTowerImageProcessor'
lowerCamelCase__ = ('RobertaTokenizer', 'RobertaTokenizerFast')
def __init__( self, __a, __a):
'''simple docstring'''
super().__init__(__a, __a)
def __call__( self, __a, __a = None, __a = True, __a = False, __a = None, __a = None, __a = 0, __a = None, __a = None, __a = None, __a = False, __a = False, __a = False, __a = False, __a = True, __a = None, **__a, ):
'''simple docstring'''
_lowerCAmelCase : int = self.tokenizer(
text=__a, add_special_tokens=__a, padding=__a, truncation=__a, max_length=__a, stride=__a, pad_to_multiple_of=__a, return_token_type_ids=__a, return_attention_mask=__a, return_overflowing_tokens=__a, return_special_tokens_mask=__a, return_offsets_mapping=__a, return_length=__a, verbose=__a, return_tensors=__a, **__a, )
# add pixel_values + pixel_mask
_lowerCAmelCase : Optional[Any] = self.image_processor(
__a, return_tensors=__a, do_normalize=__a, do_center_crop=__a, **__a)
encoding.update(__a)
return encoding
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a, **__a)
def snake_case__ ( self, *__a, **__a):
'''simple docstring'''
return self.tokenizer.decode(*__a, **__a)
@property
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = self.tokenizer.model_input_names
_lowerCAmelCase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 658 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'upernet'
def __init__( self, __a=None, __a=512, __a=0.02, __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=384, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.")
_lowerCAmelCase : List[str] = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"])
elif isinstance(__a, __a):
_lowerCAmelCase : List[Any] = backbone_config.get("model_type")
_lowerCAmelCase : Dict = CONFIG_MAPPING[backbone_model_type]
_lowerCAmelCase : Optional[Any] = config_class.from_dict(__a)
_lowerCAmelCase : Tuple = backbone_config
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : str = pool_scales
_lowerCAmelCase : List[str] = use_auxiliary_head
_lowerCAmelCase : Dict = auxiliary_loss_weight
_lowerCAmelCase : Tuple = auxiliary_in_channels
_lowerCAmelCase : Optional[Any] = auxiliary_channels
_lowerCAmelCase : str = auxiliary_num_convs
_lowerCAmelCase : Union[str, Any] = auxiliary_concat_input
_lowerCAmelCase : Dict = loss_ignore_index
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Any = copy.deepcopy(self.__dict__)
_lowerCAmelCase : List[Any] = self.backbone_config.to_dict()
_lowerCAmelCase : Optional[Any] = self.__class__.model_type
return output
| 658 | 1 |
import math
import random
def A ( _lowerCamelCase , _lowerCamelCase = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
_snake_case = 0.02
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = float(2 * (random.randint(1 , 100 )) - 1 )
for _ in range(_lowerCamelCase ):
# Forward propagation
_lowerCAmelCase : Optional[int] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
_lowerCAmelCase : Dict = (expected / 100) - layer_a
# Error delta
_lowerCAmelCase : List[str] = layer_1_error * sigmoid_function(_lowerCamelCase , _lowerCamelCase )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = int(input("Expected value: "))
_snake_case = int(input("Number of propagations: "))
print(forward_propagation(expected, number_propagations))
| 658 |
import baseaa
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaaencode(string.encode("utf-8" ) )
def A ( _lowerCamelCase ):
'''simple docstring'''
return baseaa.aaadecode(_lowerCamelCase ).decode("utf-8" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
from math import sqrt
def A ( _lowerCamelCase ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(_lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A ( _lowerCamelCase = 10_001 ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : int = 1
while count != nth and number < 3:
number += 1
if is_prime(_lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(_lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 658 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'data2vec-vision'
def __init__( self, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.0, __a=0.0, __a=0.02, __a=1E-12, __a=224, __a=16, __a=3, __a=False, __a=False, __a=False, __a=False, __a=0.1, __a=0.1, __a=True, __a=[3, 5, 7, 11], __a=[1, 2, 3, 6], __a=True, __a=0.4, __a=256, __a=1, __a=False, __a=255, **__a, ):
'''simple docstring'''
super().__init__(**__a)
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : int = hidden_dropout_prob
_lowerCAmelCase : Dict = attention_probs_dropout_prob
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = layer_norm_eps
_lowerCAmelCase : Optional[int] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : Optional[Any] = num_channels
_lowerCAmelCase : str = use_mask_token
_lowerCAmelCase : List[str] = use_absolute_position_embeddings
_lowerCAmelCase : str = use_relative_position_bias
_lowerCAmelCase : List[str] = use_shared_relative_position_bias
_lowerCAmelCase : List[str] = layer_scale_init_value
_lowerCAmelCase : List[Any] = drop_path_rate
_lowerCAmelCase : Union[str, Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
_lowerCAmelCase : Tuple = out_indices
_lowerCAmelCase : Tuple = pool_scales
# auxiliary head attributes (semantic segmentation)
_lowerCAmelCase : Optional[int] = use_auxiliary_head
_lowerCAmelCase : Optional[Any] = auxiliary_loss_weight
_lowerCAmelCase : int = auxiliary_channels
_lowerCAmelCase : Optional[Any] = auxiliary_num_convs
_lowerCAmelCase : int = auxiliary_concat_input
_lowerCAmelCase : Dict = semantic_loss_ignore_index
class UpperCAmelCase_ ( a):
lowerCamelCase__ = version.parse('1.11')
@property
def snake_case__ ( self):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def snake_case__ ( self):
'''simple docstring'''
return 1E-4
| 658 | 1 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_snake_case = logging.get_logger(__name__) # pylint: disable=invalid-name
_snake_case = "\n Examples:\n ```py\n >>> import torch\n >>> import numpy as np\n\n >>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline\n >>> from transformers import pipeline\n >>> from diffusers.utils import load_image\n\n\n >>> def make_hint(image, depth_estimator):\n ... image = depth_estimator(image)[\"depth\"]\n ... image = np.array(image)\n ... image = image[:, :, None]\n ... image = np.concatenate([image, image, image], axis=2)\n ... detected_map = torch.from_numpy(image).float() / 255.0\n ... hint = detected_map.permute(2, 0, 1)\n ... return hint\n\n\n >>> depth_estimator = pipeline(\"depth-estimation\")\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16\n ... )\n >>> pipe_prior = pipe_prior.to(\"cuda\")\n\n >>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(\n ... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16\n ... )\n >>> pipe = pipe.to(\"cuda\")\n\n\n >>> img = load_image(\n ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"\n ... \"/kandinsky/cat.png\"\n ... ).resize((768, 768))\n\n >>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")\n\n >>> prompt = \"A robot, 4k photo\"\n >>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"\n\n >>> generator = torch.Generator(device=\"cuda\").manual_seed(43)\n\n >>> image_emb, zero_image_emb = pipe_prior(\n ... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator\n ... ).to_tuple()\n\n >>> images = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... hint=hint,\n ... num_inference_steps=50,\n ... generator=generator,\n ... height=768,\n ... width=768,\n ... ).images\n\n >>> images[0].save(\"robot_cat.png\")\n ```\n"
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ):
'''simple docstring'''
_lowerCAmelCase : Tuple = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : List[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a, ):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__a, scheduler=__a, movq=__a, )
_lowerCAmelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels) - 1)
def snake_case__ ( self, __a, __a, __a, __a, __a, __a):
'''simple docstring'''
if latents is None:
_lowerCAmelCase : int = randn_tensor(__a, generator=__a, device=__a, dtype=__a)
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
_lowerCAmelCase : List[Any] = latents.to(__a)
_lowerCAmelCase : List[Any] = latents * scheduler.init_noise_sigma
return latents
def snake_case__ ( self, __a=0):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`")
_lowerCAmelCase : List[Any] = torch.device(f"cuda:{gpu_id}")
_lowerCAmelCase : str = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__a, __a)
def snake_case__ ( self, __a=0):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
_lowerCAmelCase : Optional[Any] = torch.device(f"cuda:{gpu_id}")
if self.device.type != "cpu":
self.to("cpu", silence_dtype_warnings=__a)
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : Any = cpu_offload_with_hook(__a, __a, prev_module_hook=__a)
# We'll offload the last model manually.
_lowerCAmelCase : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def snake_case__ ( self):
'''simple docstring'''
if not hasattr(self.unet, "_hf_hook"):
return self.device
for module in self.unet.modules():
if (
hasattr(__a, "_hf_hook")
and hasattr(module._hf_hook, "execution_device")
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device)
return self.device
@torch.no_grad()
@replace_example_docstring(__a)
def __call__( self, __a, __a, __a, __a = 512, __a = 512, __a = 100, __a = 4.0, __a = 1, __a = None, __a = None, __a = "pil", __a = True, ):
'''simple docstring'''
_lowerCAmelCase : List[str] = self._execution_device
_lowerCAmelCase : int = guidance_scale > 1.0
if isinstance(__a, __a):
_lowerCAmelCase : int = torch.cat(__a, dim=0)
if isinstance(__a, __a):
_lowerCAmelCase : Any = torch.cat(__a, dim=0)
if isinstance(__a, __a):
_lowerCAmelCase : Tuple = torch.cat(__a, dim=0)
_lowerCAmelCase : Any = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_lowerCAmelCase : Dict = image_embeds.repeat_interleave(__a, dim=0)
_lowerCAmelCase : int = negative_image_embeds.repeat_interleave(__a, dim=0)
_lowerCAmelCase : Optional[int] = hint.repeat_interleave(__a, dim=0)
_lowerCAmelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds], dim=0).to(dtype=self.unet.dtype, device=__a)
_lowerCAmelCase : str = torch.cat([hint, hint], dim=0).to(dtype=self.unet.dtype, device=__a)
self.scheduler.set_timesteps(__a, device=__a)
_lowerCAmelCase : Optional[Any] = self.scheduler.timesteps
_lowerCAmelCase : Tuple = self.movq.config.latent_channels
_lowerCAmelCase , _lowerCAmelCase : Any = downscale_height_and_width(__a, __a, self.movq_scale_factor)
# create initial latent
_lowerCAmelCase : Dict = self.prepare_latents(
(batch_size, num_channels_latents, height, width), image_embeds.dtype, __a, __a, __a, self.scheduler, )
for i, t in enumerate(self.progress_bar(__a)):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : List[str] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
_lowerCAmelCase : Optional[int] = {"image_embeds": image_embeds, "hint": hint}
_lowerCAmelCase : List[Any] = self.unet(
sample=__a, timestep=__a, encoder_hidden_states=__a, added_cond_kwargs=__a, return_dict=__a, )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Dict = noise_pred.split(latents.shape[1], dim=1)
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = noise_pred.chunk(2)
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = variance_pred.chunk(2)
_lowerCAmelCase : Union[str, Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text], dim=1)
if not (
hasattr(self.scheduler.config, "variance_type")
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : Any = noise_pred.split(latents.shape[1], dim=1)
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Tuple = self.scheduler.step(
__a, __a, __a, generator=__a, )[0]
# post-processing
_lowerCAmelCase : List[str] = self.movq.decode(__a, force_not_quantize=__a)["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}")
if output_type in ["np", "pil"]:
_lowerCAmelCase : Tuple = image * 0.5 + 0.5
_lowerCAmelCase : int = image.clamp(0, 1)
_lowerCAmelCase : int = image.cpu().permute(0, 2, 3, 1).float().numpy()
if output_type == "pil":
_lowerCAmelCase : Any = self.numpy_to_pil(__a)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__a)
| 658 |
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_snake_case = {
"cola": 2,
"mnli": 3,
"mrpc": 2,
"sst-2": 2,
"sts-b": 1,
"qqp": 2,
"qnli": 2,
"rte": 2,
"wnli": 2,
}
logging.set_verbosity_info()
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = XLNetConfig.from_json_file(_lowerCamelCase )
_lowerCAmelCase : Any = finetuning_task.lower() if finetuning_task is not None else ""
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(F"Building PyTorch XLNetForSequenceClassification model from configuration: {config}" )
_lowerCAmelCase : Any = finetuning_task
_lowerCAmelCase : Any = GLUE_TASKS_NUM_LABELS[finetuning_task]
_lowerCAmelCase : Union[str, Any] = XLNetForSequenceClassification(_lowerCamelCase )
elif "squad" in finetuning_task:
_lowerCAmelCase : Union[str, Any] = finetuning_task
_lowerCAmelCase : Any = XLNetForQuestionAnswering(_lowerCamelCase )
else:
_lowerCAmelCase : Union[str, Any] = XLNetLMHeadModel(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
_lowerCAmelCase : Optional[int] = os.path.join(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Dict = os.path.join(_lowerCamelCase , _lowerCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_lowerCamelCase )}" )
torch.save(model.state_dict() , _lowerCamelCase )
print(F"Save configuration file to {os.path.abspath(_lowerCamelCase )}" )
with open(_lowerCamelCase , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--xlnet_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained XLNet model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--finetuning_task",
default=None,
type=str,
help="Name of a task on which the XLNet TensorFlow model was fine-tuned",
)
_snake_case = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 658 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'switch_transformers'
lowerCamelCase__ = ['past_key_values']
lowerCamelCase__ = {'hidden_size': 'd_model', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self, __a=3_2128, __a=768, __a=64, __a=2048, __a=64, __a=12, __a=3, __a=12, __a=3, __a=12, __a=8, __a=False, __a=0.01, __a="float32", __a=False, __a=32, __a=128, __a=0.1, __a=1E-6, __a=0.001, __a=0.001, __a=1.0, __a="relu", __a=True, __a=False, __a=True, __a=0, __a=1, **__a, ):
'''simple docstring'''
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : str = d_model
_lowerCAmelCase : Optional[Any] = d_kv
_lowerCAmelCase : Dict = d_ff
_lowerCAmelCase : List[str] = num_sparse_encoder_layers
_lowerCAmelCase : Tuple = num_layers
_lowerCAmelCase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_lowerCAmelCase : List[Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_lowerCAmelCase : Any = self.num_layers // self.num_sparse_encoder_layers
else:
_lowerCAmelCase : Optional[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_lowerCAmelCase : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_lowerCAmelCase : List[str] = self.num_decoder_layers # HACK: this will create 0 sparse layers
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : int = num_experts
_lowerCAmelCase : str = expert_capacity
_lowerCAmelCase : int = router_bias
_lowerCAmelCase : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}")
_lowerCAmelCase : str = router_dtype
_lowerCAmelCase : Tuple = router_ignore_padding_tokens
_lowerCAmelCase : Dict = relative_attention_num_buckets
_lowerCAmelCase : List[Any] = relative_attention_max_distance
_lowerCAmelCase : Any = dropout_rate
_lowerCAmelCase : str = layer_norm_epsilon
_lowerCAmelCase : List[str] = initializer_factor
_lowerCAmelCase : Any = feed_forward_proj
_lowerCAmelCase : str = use_cache
_lowerCAmelCase : str = add_router_probs
_lowerCAmelCase : List[Any] = router_z_loss_coef
_lowerCAmelCase : Optional[int] = router_aux_loss_coef
_lowerCAmelCase : Optional[Any] = self.feed_forward_proj.split("-")
_lowerCAmelCase : Any = act_info[-1]
_lowerCAmelCase : List[str] = act_info[0] == "gated"
if len(__a) > 1 and act_info[0] != "gated" or len(__a) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'")
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_lowerCAmelCase : Optional[Any] = "gelu_new"
super().__init__(
pad_token_id=__a, eos_token_id=__a, is_encoder_decoder=__a, **__a, )
| 658 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
_snake_case = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
_snake_case = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
_snake_case = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class UpperCAmelCase_ ( datasets.Metric):
def snake_case__ ( self):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="token"), id="sequence"),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string", id="token"), id="sequence"), id="references"),
}), codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"], reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
], )
def snake_case__ ( self, __a, __a, __a=4, __a=False):
'''simple docstring'''
_lowerCAmelCase : List[str] = compute_bleu(
reference_corpus=__a, translation_corpus=__a, max_order=__a, smooth=__a)
((_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase) , (_lowerCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 658 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( a , a , a):
@register_to_config
def __init__( self, __a, __a, __a, __a, __a, __a, __a, __a, __a, __a = False, ):
'''simple docstring'''
super().__init__()
_lowerCAmelCase : Union[str, Any] = nn.Embedding(__a, __a)
_lowerCAmelCase : Optional[Any] = nn.Embedding(__a, __a)
_lowerCAmelCase : int = False
_lowerCAmelCase : List[str] = nn.Dropout(p=__a)
_lowerCAmelCase : str = TaConfig(
vocab_size=__a, d_model=__a, num_heads=__a, d_kv=__a, d_ff=__a, dropout_rate=__a, feed_forward_proj=__a, is_decoder=__a, is_encoder_decoder=__a, )
_lowerCAmelCase : Tuple = nn.ModuleList()
for lyr_num in range(__a):
_lowerCAmelCase : Union[str, Any] = TaBlock(__a)
self.encoders.append(__a)
_lowerCAmelCase : Any = TaLayerNorm(__a)
_lowerCAmelCase : Any = nn.Dropout(p=__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
_lowerCAmelCase : str = self.token_embedder(__a)
_lowerCAmelCase : Tuple = encoder_input_tokens.shape[1]
_lowerCAmelCase : Dict = torch.arange(__a, device=encoder_input_tokens.device)
x += self.position_encoding(__a)
_lowerCAmelCase : Dict = self.dropout_pre(__a)
# inverted the attention mask
_lowerCAmelCase : Optional[Any] = encoder_input_tokens.size()
_lowerCAmelCase : Optional[Any] = self.get_extended_attention_mask(__a, __a)
for lyr in self.encoders:
_lowerCAmelCase : Tuple = lyr(__a, __a)[0]
_lowerCAmelCase : str = self.layer_norm(__a)
return self.dropout_post(__a), encoder_inputs_mask
| 658 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase : Dict = OmegaConf.load(_lowerCamelCase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowerCamelCase ) ) )
return config
def A ( _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=None ):
'''simple docstring'''
if conf_path is None:
_lowerCAmelCase : Union[str, Any] = "./model_checkpoints/vqgan_only.yaml"
_lowerCAmelCase : Tuple = load_config(_lowerCamelCase , display=_lowerCamelCase )
_lowerCAmelCase : str = VQModel(**config.model.params )
if ckpt_path is None:
_lowerCAmelCase : Optional[int] = "./model_checkpoints/vqgan_only.pt"
_lowerCAmelCase : int = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
if ".ckpt" in ckpt_path:
_lowerCAmelCase : List[Any] = sd["state_dict"]
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
model.to(_lowerCamelCase )
del sd
return model
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = model.encode(_lowerCamelCase )
print(F"VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}" )
_lowerCAmelCase : int = model.decode(_lowerCamelCase )
return xrec
def A ( _lowerCamelCase , _lowerCamelCase=False ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : List[str] = string.rsplit("." , 1 )
if reload:
_lowerCAmelCase : Dict = importlib.import_module(_lowerCamelCase )
importlib.reload(_lowerCamelCase )
return getattr(importlib.import_module(_lowerCamelCase , package=_lowerCamelCase ) , cls )
def A ( _lowerCamelCase ):
'''simple docstring'''
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=True , _lowerCamelCase=True ):
'''simple docstring'''
_lowerCAmelCase : str = instantiate_from_config(_lowerCamelCase )
if sd is not None:
model.load_state_dict(_lowerCamelCase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if ckpt:
_lowerCAmelCase : Optional[int] = torch.load(_lowerCamelCase , map_location="cpu" )
_lowerCAmelCase : int = pl_sd["global_step"]
print(F"loaded model from global step {global_step}." )
else:
_lowerCAmelCase : Optional[int] = {"state_dict": None}
_lowerCAmelCase : Any = None
_lowerCAmelCase : Optional[int] = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=_lowerCamelCase , eval_mode=_lowerCamelCase )["model"]
return model, global_step
| 658 | 1 |
import argparse
import collections
import os
import re
import tempfile
import pandas as pd
from datasets import Dataset
from huggingface_hub import hf_hub_download, upload_folder
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/update_metadata.py
_snake_case = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_snake_case = direct_transformers_import(TRANSFORMERS_PATH)
# Regexes that match TF/Flax/PT model names.
_snake_case = re.compile(R"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
_snake_case = re.compile(R"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_snake_case = re.compile(R"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Fill this with tuples (pipeline_tag, model_mapping, auto_model)
_snake_case = [
("pretraining", "MODEL_FOR_PRETRAINING_MAPPING_NAMES", "AutoModelForPreTraining"),
("feature-extraction", "MODEL_MAPPING_NAMES", "AutoModel"),
("audio-classification", "MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForAudioClassification"),
("text-generation", "MODEL_FOR_CAUSAL_LM_MAPPING_NAMES", "AutoModelForCausalLM"),
("automatic-speech-recognition", "MODEL_FOR_CTC_MAPPING_NAMES", "AutoModelForCTC"),
("image-classification", "MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForImageClassification"),
("image-segmentation", "MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES", "AutoModelForImageSegmentation"),
("fill-mask", "MODEL_FOR_MASKED_LM_MAPPING_NAMES", "AutoModelForMaskedLM"),
("object-detection", "MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES", "AutoModelForObjectDetection"),
(
"zero-shot-object-detection",
"MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING_NAMES",
"AutoModelForZeroShotObjectDetection",
),
("question-answering", "MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES", "AutoModelForQuestionAnswering"),
("text2text-generation", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES", "AutoModelForSeq2SeqLM"),
("text-classification", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES", "AutoModelForSequenceClassification"),
("automatic-speech-recognition", "MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES", "AutoModelForSpeechSeq2Seq"),
(
"table-question-answering",
"MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForTableQuestionAnswering",
),
("token-classification", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES", "AutoModelForTokenClassification"),
("multiple-choice", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES", "AutoModelForMultipleChoice"),
(
"next-sentence-prediction",
"MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES",
"AutoModelForNextSentencePrediction",
),
(
"audio-frame-classification",
"MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForAudioFrameClassification",
),
("audio-xvector", "MODEL_FOR_AUDIO_XVECTOR_MAPPING_NAMES", "AutoModelForAudioXVector"),
(
"document-question-answering",
"MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForDocumentQuestionAnswering",
),
(
"visual-question-answering",
"MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES",
"AutoModelForVisualQuestionAnswering",
),
("image-to-text", "MODEL_FOR_FOR_VISION_2_SEQ_MAPPING_NAMES", "AutoModelForVision2Seq"),
(
"zero-shot-image-classification",
"MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING_NAMES",
"AutoModelForZeroShotImageClassification",
),
("depth-estimation", "MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES", "AutoModelForDepthEstimation"),
("video-classification", "MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES", "AutoModelForVideoClassification"),
("mask-generation", "MODEL_FOR_MASK_GENERATION_MAPPING_NAMES", "AutoModelForMaskGeneration"),
]
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , _lowerCamelCase )
return [m.group(0 ) for m in matches]
def A ( ):
'''simple docstring'''
_lowerCAmelCase : str = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowerCAmelCase : Tuple = {
config.replace("Config" , "" ): model_type for model_type, config in config_maping_names.items()
}
# Dictionaries flagging if each model prefix has a backend in PT/TF/Flax.
_lowerCAmelCase : List[str] = collections.defaultdict(_lowerCamelCase )
_lowerCAmelCase : List[str] = collections.defaultdict(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = collections.defaultdict(_lowerCamelCase )
# Let's lookup through all transformers object (once) and find if models are supported by a given backend.
for attr_name in dir(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = None
if _re_tf_models.match(_lowerCamelCase ) is not None:
_lowerCAmelCase : Dict = tf_models
_lowerCAmelCase : Optional[int] = _re_tf_models.match(_lowerCamelCase ).groups()[0]
elif _re_flax_models.match(_lowerCamelCase ) is not None:
_lowerCAmelCase : Union[str, Any] = flax_models
_lowerCAmelCase : Union[str, Any] = _re_flax_models.match(_lowerCamelCase ).groups()[0]
elif _re_pt_models.match(_lowerCamelCase ) is not None:
_lowerCAmelCase : str = pt_models
_lowerCAmelCase : List[str] = _re_pt_models.match(_lowerCamelCase ).groups()[0]
if lookup_dict is not None:
while len(_lowerCamelCase ) > 0:
if attr_name in model_prefix_to_model_type:
_lowerCAmelCase : Optional[Any] = True
break
# Try again after removing the last word in the name
_lowerCAmelCase : Tuple = "".join(camel_case_split(_lowerCamelCase )[:-1] )
_lowerCAmelCase : Union[str, Any] = set(list(pt_models.keys() ) + list(tf_models.keys() ) + list(flax_models.keys() ) )
_lowerCAmelCase : List[Any] = list(_lowerCamelCase )
all_models.sort()
_lowerCAmelCase : List[str] = {"model_type": all_models}
_lowerCAmelCase : Tuple = [pt_models[t] for t in all_models]
_lowerCAmelCase : Optional[int] = [tf_models[t] for t in all_models]
_lowerCAmelCase : Dict = [flax_models[t] for t in all_models]
# Now let's use the auto-mapping names to make sure
_lowerCAmelCase : Optional[int] = {}
for t in all_models:
if t in transformers_module.models.auto.processing_auto.PROCESSOR_MAPPING_NAMES:
_lowerCAmelCase : Optional[int] = "AutoProcessor"
elif t in transformers_module.models.auto.tokenization_auto.TOKENIZER_MAPPING_NAMES:
_lowerCAmelCase : Dict = "AutoTokenizer"
elif t in transformers_module.models.auto.feature_extraction_auto.FEATURE_EXTRACTOR_MAPPING_NAMES:
_lowerCAmelCase : List[Any] = "AutoFeatureExtractor"
else:
# Default to AutoTokenizer if a model has nothing, for backward compatibility.
_lowerCAmelCase : Tuple = "AutoTokenizer"
_lowerCAmelCase : Union[str, Any] = [processors[t] for t in all_models]
return pd.DataFrame(_lowerCamelCase )
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = [
transformers_module.models.auto.modeling_auto,
transformers_module.models.auto.modeling_tf_auto,
transformers_module.models.auto.modeling_flax_auto,
]
for pipeline_tag, model_mapping, auto_class in PIPELINE_TAGS_AND_AUTO_MODELS:
_lowerCAmelCase : str = [model_mapping, F"TF_{model_mapping}", F"FLAX_{model_mapping}"]
_lowerCAmelCase : Dict = [auto_class, F"TF_{auto_class}", F"Flax_{auto_class}"]
# Loop through all three frameworks
for module, cls, mapping in zip(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
# The type of pipeline may not exist in this framework
if not hasattr(_lowerCamelCase , _lowerCamelCase ):
continue
# First extract all model_names
_lowerCAmelCase : int = []
for name in getattr(_lowerCamelCase , _lowerCamelCase ).values():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
model_names.append(_lowerCamelCase )
else:
model_names.extend(list(_lowerCamelCase ) )
# Add pipeline tag and auto model class for those models
table.update({model_name: (pipeline_tag, cls) for model_name in model_names} )
return table
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = get_frameworks_table()
_lowerCAmelCase : Union[str, Any] = Dataset.from_pandas(_lowerCamelCase )
_lowerCAmelCase : int = hf_hub_download(
"huggingface/transformers-metadata" , "pipeline_tags.json" , repo_type="dataset" , token=_lowerCamelCase )
_lowerCAmelCase : int = Dataset.from_json(_lowerCamelCase )
_lowerCAmelCase : Tuple = {
tags_dataset[i]["model_class"]: (tags_dataset[i]["pipeline_tag"], tags_dataset[i]["auto_class"])
for i in range(len(_lowerCamelCase ) )
}
_lowerCAmelCase : Tuple = update_pipeline_and_auto_class_table(_lowerCamelCase )
# Sort the model classes to avoid some nondeterministic updates to create false update commits.
_lowerCAmelCase : Optional[Any] = sorted(table.keys() )
_lowerCAmelCase : str = pd.DataFrame(
{
"model_class": model_classes,
"pipeline_tag": [table[m][0] for m in model_classes],
"auto_class": [table[m][1] for m in model_classes],
} )
_lowerCAmelCase : int = Dataset.from_pandas(_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmp_dir:
frameworks_dataset.to_json(os.path.join(_lowerCamelCase , "frameworks.json" ) )
tags_dataset.to_json(os.path.join(_lowerCamelCase , "pipeline_tags.json" ) )
if commit_sha is not None:
_lowerCAmelCase : Optional[Any] = (
F"Update with commit {commit_sha}\n\nSee: "
F"https://github.com/huggingface/transformers/commit/{commit_sha}"
)
else:
_lowerCAmelCase : Tuple = "Update"
upload_folder(
repo_id="huggingface/transformers-metadata" , folder_path=_lowerCamelCase , repo_type="dataset" , token=_lowerCamelCase , commit_message=_lowerCamelCase , )
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Any = {tag: cls for tag, _, cls in PIPELINE_TAGS_AND_AUTO_MODELS}
_lowerCAmelCase : Dict = transformers_module.pipelines.SUPPORTED_TASKS
_lowerCAmelCase : List[Any] = []
for key in pipeline_tasks:
if key not in in_table:
_lowerCAmelCase : Any = pipeline_tasks[key]["pt"]
if isinstance(_lowerCamelCase , (list, tuple) ):
_lowerCAmelCase : Optional[Any] = model[0]
_lowerCAmelCase : List[str] = model.__name__
if model not in in_table.values():
missing.append(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
_lowerCAmelCase : str = ", ".join(_lowerCamelCase )
raise ValueError(
"The following pipeline tags are not present in the `PIPELINE_TAGS_AND_AUTO_MODELS` constant inside "
F"`utils/update_metadata.py`: {msg}. Please add them!" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument("--token", type=str, help="The token to use to push to the transformers-metadata dataset.")
parser.add_argument("--commit_sha", type=str, help="The sha of the commit going with this update.")
parser.add_argument("--check-only", action="store_true", help="Activate to just check all pipelines are present.")
_snake_case = parser.parse_args()
if args.check_only:
check_pipeline_tags()
else:
update_metadata(args.token, args.commit_sha)
| 658 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class UpperCAmelCase_ ( a):
lowerCamelCase__ = 'roc_bert'
def __init__( self, __a=3_0522, __a=768, __a=12, __a=12, __a=3072, __a="gelu", __a=0.1, __a=0.1, __a=512, __a=2, __a=0.02, __a=1E-12, __a=True, __a=0, __a="absolute", __a=None, __a=True, __a=True, __a=768, __a=910, __a=512, __a=2_4858, __a=True, **__a, ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = vocab_size
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : str = num_hidden_layers
_lowerCAmelCase : List[Any] = num_attention_heads
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Optional[int] = hidden_act
_lowerCAmelCase : Optional[Any] = hidden_dropout_prob
_lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Union[str, Any] = initializer_range
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : int = layer_norm_eps
_lowerCAmelCase : Union[str, Any] = use_cache
_lowerCAmelCase : Optional[int] = enable_pronunciation
_lowerCAmelCase : Dict = enable_shape
_lowerCAmelCase : Optional[Any] = pronunciation_embed_dim
_lowerCAmelCase : Any = pronunciation_vocab_size
_lowerCAmelCase : List[str] = shape_embed_dim
_lowerCAmelCase : int = shape_vocab_size
_lowerCAmelCase : Optional[int] = concat_input
_lowerCAmelCase : Dict = position_embedding_type
_lowerCAmelCase : Tuple = classifier_dropout
super().__init__(pad_token_id=__a, **__a)
| 658 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
_snake_case = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 658 |
from __future__ import annotations
def A ( _lowerCamelCase ):
'''simple docstring'''
if not nums:
raise ValueError("List is empty" )
return sum(_lowerCamelCase ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
import itertools
import string
from collections.abc import Generator, Iterable
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = iter(_lowerCamelCase )
while True:
_lowerCAmelCase : Dict = tuple(itertools.islice(_lowerCamelCase , _lowerCamelCase ) )
if not chunk:
return
yield chunk
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = "".join([c.upper() for c in dirty if c in string.ascii_letters] )
_lowerCAmelCase : Dict = ""
if len(_lowerCamelCase ) < 2:
return dirty
for i in range(len(_lowerCamelCase ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(_lowerCamelCase ) & 1:
clean += "X"
return clean
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = "ABCDEFGHIKLMNOPQRSTUVWXYZ"
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_lowerCAmelCase : int = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(_lowerCamelCase )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(_lowerCamelCase )
return table
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = generate_table(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = prepare_input(_lowerCamelCase )
_lowerCAmelCase : Any = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
_lowerCAmelCase , _lowerCAmelCase : str = divmod(table.index(_lowerCamelCase ) , 5 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = generate_table(_lowerCamelCase )
_lowerCAmelCase : Tuple = ""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(_lowerCamelCase , 2 ):
_lowerCAmelCase , _lowerCAmelCase : str = divmod(table.index(_lowerCamelCase ) , 5 )
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = divmod(table.index(_lowerCamelCase ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 658 |
def A ( _lowerCamelCase ):
'''simple docstring'''
if length <= 0 or not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowerCamelCase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 658 | 1 |
from copy import deepcopy
class UpperCAmelCase_ :
def __init__( self, __a = None, __a = None):
'''simple docstring'''
if arr is None and size is not None:
_lowerCAmelCase : int = size
_lowerCAmelCase : Any = [0] * size
elif arr is not None:
self.init(__a)
else:
raise ValueError("Either arr or size must be specified")
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = len(__a)
_lowerCAmelCase : List[str] = deepcopy(__a)
for i in range(1, self.size):
_lowerCAmelCase : List[Any] = self.next_(__a)
if j < self.size:
self.tree[j] += self.tree[i]
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.tree[:]
for i in range(self.size - 1, 0, -1):
_lowerCAmelCase : Union[str, Any] = self.next_(__a)
if j < self.size:
arr[j] -= arr[i]
return arr
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
return index + (index & (-index))
@staticmethod
def snake_case__ ( __a):
'''simple docstring'''
return index - (index & (-index))
def snake_case__ ( self, __a, __a):
'''simple docstring'''
if index == 0:
self.tree[0] += value
return
while index < self.size:
self.tree[index] += value
_lowerCAmelCase : List[str] = self.next_(__a)
def snake_case__ ( self, __a, __a):
'''simple docstring'''
self.add(__a, value - self.get(__a))
def snake_case__ ( self, __a):
'''simple docstring'''
if right == 0:
return 0
_lowerCAmelCase : int = self.tree[0]
right -= 1 # make right inclusive
while right > 0:
result += self.tree[right]
_lowerCAmelCase : Any = self.prev(__a)
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
return self.prefix(__a) - self.prefix(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.query(__a, index + 1)
def snake_case__ ( self, __a):
'''simple docstring'''
value -= self.tree[0]
if value < 0:
return -1
_lowerCAmelCase : int = 1 # Largest power of 2 <= size
while j * 2 < self.size:
j *= 2
_lowerCAmelCase : Optional[Any] = 0
while j > 0:
if i + j < self.size and self.tree[i + j] <= value:
value -= self.tree[i + j]
i += j
j //= 2
return i
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 |
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( _lowerCamelCase ):
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : Tuple = features[:, labels == i]
_lowerCAmelCase : Dict = data.mean(1 )
# Centralize the data of class i
_lowerCAmelCase : Union[str, Any] = data - column_reshape(_lowerCamelCase )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(_lowerCamelCase , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : int = np.dot(_lowerCamelCase , centered_data.T )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = features.mean(1 )
_lowerCAmelCase : List[str] = np.nan
for i in range(_lowerCamelCase ):
_lowerCAmelCase : str = features[:, labels == i]
_lowerCAmelCase : Optional[Any] = data.shape[1]
_lowerCAmelCase : Optional[Any] = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
_lowerCAmelCase : Optional[Any] = device_data * np.dot(
column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase ) , (column_reshape(_lowerCamelCase ) - column_reshape(_lowerCamelCase )).T , )
return covariance_sum / features.shape[1]
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if features.any():
_lowerCAmelCase : List[Any] = features.mean(1 )
# Center the dataset
_lowerCAmelCase : List[Any] = features - np.reshape(_lowerCamelCase , (data_mean.size, 1) )
_lowerCAmelCase : Optional[Any] = np.dot(_lowerCamelCase , centered_data.T ) / features.shape[1]
_lowerCAmelCase , _lowerCAmelCase : List[Any] = np.linalg.eigh(_lowerCamelCase )
# Take all the columns in the reverse order (-1), and then takes only the first
_lowerCAmelCase : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
_lowerCAmelCase : List[Any] = np.dot(filtered_eigenvectors.T , _lowerCamelCase )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
_lowerCAmelCase , _lowerCAmelCase : List[str] = eigh(
covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) , )
_lowerCAmelCase : List[str] = eigenvectors[:, ::-1][:, :dimensions]
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = np.linalg.svd(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = svd_matrix[:, 0:dimensions]
_lowerCAmelCase : str = np.dot(filtered_svd_matrix.T , _lowerCamelCase )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase )
logging.error("Dataset empty" )
raise AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
_lowerCAmelCase : List[Any] = np.array([0, 0, 0, 1, 1] )
_lowerCAmelCase : List[Any] = 2
_lowerCAmelCase : Union[str, Any] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Union[str, Any] = linear_discriminant_analysis(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if isinstance(_lowerCamelCase , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
_lowerCAmelCase : List[str] = 2
_lowerCAmelCase : List[Any] = np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(_lowerCamelCase ) as error_info:
_lowerCAmelCase : Tuple = principal_component_analysis(_lowerCamelCase , _lowerCamelCase )
if not np.allclose(_lowerCamelCase , _lowerCamelCase ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 658 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.