code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
__snake_case : Optional[int] = 10
def __lowerCamelCase ( __snake_case : int, __snake_case : Union[str, Any], __snake_case : Dict, __snake_case : List[Any] ) -> List[str]:
"""simple docstring"""
for i in range(UpperCamelCase_, UpperCamelCase_ ):
if array[i] == target:
return i
return -1
def __lowerCamelCase ( __snake_case : List[str], __snake_case : List[str] ) -> List[str]:
"""simple docstring"""
A__ : Optional[int] =0
A__ : str =len(UpperCamelCase_ )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
A__ : Tuple =(left + right) // 3 + 1
A__ : str =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
A__ : List[Any] =one_third - 1
elif array[two_third] < target:
A__ : Dict =two_third + 1
else:
A__ : List[Any] =one_third + 1
A__ : Dict =two_third - 1
else:
return -1
def __lowerCamelCase ( __snake_case : Dict, __snake_case : int, __snake_case : Any, __snake_case : Any ) -> Any:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase_, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
A__ : str =(left + right) // 3 + 1
A__ : Any =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase_, one_third - 1, UpperCamelCase_, UpperCamelCase_ )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1, UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ )
else:
return rec_ternary_search(one_third + 1, two_third - 1, UpperCamelCase_, UpperCamelCase_ )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : int = input('Enter numbers separated by comma:\n').strip()
__snake_case : int = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
__snake_case : Any = int(input('Enter the number to be found in the list:\n').strip())
__snake_case : int = ite_ternary_search(collection, target)
__snake_case : List[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('Not found')
| 134 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE : Optional[int] = []
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
if board[row][i] == 1:
return False
for i in range(len(UpperCamelCase_ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(UpperCamelCase_ ,-1 ,-1 ) ,range(UpperCamelCase_ ,len(UpperCamelCase_ ) ) ):
if board[i][j] == 1:
return False
return True
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
if row >= len(UpperCamelCase_ ):
solution.append(UpperCamelCase_ )
printboard(UpperCamelCase_ )
print()
return True
for i in range(len(UpperCamelCase_ ) ):
if is_safe(UpperCamelCase_ ,UpperCamelCase_ ,UpperCamelCase_ ):
snake_case = 1
solve(UpperCamelCase_ ,row + 1 )
snake_case = 0
return False
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
for i in range(len(UpperCamelCase_ ) ):
for j in range(len(UpperCamelCase_ ) ):
if board[i][j] == 1:
print('''Q''' ,end=''' ''' )
else:
print('''.''' ,end=''' ''' )
print()
# n=int(input("The no. of queens"))
_SCREAMING_SNAKE_CASE : Tuple = 8
_SCREAMING_SNAKE_CASE : List[Any] = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("The total no. of solutions are :", len(solution))
| 127 | 0 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
_SCREAMING_SNAKE_CASE = pytest.mark.integration
_SCREAMING_SNAKE_CASE = {'comet'}
_SCREAMING_SNAKE_CASE = importlib.util.find_spec('fairseq') is not None
_SCREAMING_SNAKE_CASE = {'code_eval'}
_SCREAMING_SNAKE_CASE = os.name == 'nt'
_SCREAMING_SNAKE_CASE = {'bertscore', 'frugalscore', 'perplexity'}
_SCREAMING_SNAKE_CASE = importlib.util.find_spec('transformers') is not None
def snake_case ( snake_case__ :str) -> Optional[Any]:
@wraps(snake_case__)
def wrapper(self :List[Any] , snake_case__ :Optional[Any]):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""")
else:
test_case(self , snake_case__)
return wrapper
def snake_case ( snake_case__ :str) -> Optional[int]:
@wraps(snake_case__)
def wrapper(self :Tuple , snake_case__ :List[str]):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""")
else:
test_case(self , snake_case__)
return wrapper
def snake_case ( snake_case__ :Dict) -> Tuple:
@wraps(snake_case__)
def wrapper(self :str , snake_case__ :Tuple):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""")
else:
test_case(self , snake_case__)
return wrapper
def snake_case ( ) -> str:
_A = [metric_dir.split(os.sep)[-2] for metric_dir in glob.glob("""./metrics/*/""")]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
_a , _a , _a )
@local
class a ( parameterized.TestCase ):
"""simple docstring"""
lowerCamelCase :int = {}
lowerCamelCase :List[Any] = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""" )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Tuple:
_A = "[...]"
_A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , _a ) ).module_path )
_A = datasets.load.import_main_class(metric_module.__name__ , dataset=_a )
# check parameters
_A = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(_a , metric_module.__name__ ):
with self.use_local_metrics():
try:
_A = doctest.testmod(_a , verbose=_a , raise_on_error=_a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Dict:
_A = "[...]"
_A = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , _a ) ).module_path )
# run doctest
with self.use_local_metrics():
_A = doctest.testmod(_a , verbose=_a , raise_on_error=_a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](_a ):
yield
else:
yield
@contextmanager
def UpperCAmelCase ( self ) -> Dict:
def load_local_metric(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ):
return load_metric(os.path.join("""metrics""" , _a ) , *_a , **_a )
with patch("""datasets.load_metric""" ) as mock_load_metric:
_A = load_local_metric
yield
@classmethod
def UpperCAmelCase ( cls , lowerCAmelCase_ ) -> Dict:
def wrapper(lowerCAmelCase_ ):
_A = contextmanager(_a )
_A = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""")
def snake_case ( snake_case__ :str) -> Dict:
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""") # handle pytest cli flags
class a ( _a ):
"""simple docstring"""
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
assert len(input_dict["""input_ids"""] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""") as mock_create_predictor:
_A = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""")
def snake_case ( snake_case__ :Optional[int]) -> Any:
import torch
def bert_cos_score_idf(snake_case__ :Union[str, Any] , snake_case__ :Union[str, Any] , *snake_case__ :Optional[int] , **snake_case__ :Optional[Any]):
return torch.tensor([[1.0, 1.0, 1.0]] * len(snake_case__))
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model"""), patch(
"""bert_score.scorer.bert_cos_score_idf""") as mock_bert_cos_score_idf:
_A = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""")
def snake_case ( snake_case__ :Any) -> Tuple:
def load_from_checkpoint(snake_case__ :Optional[Any]):
class a :
"""simple docstring"""
def UpperCAmelCase ( self , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
assert len(_a ) == 2
_A = [0.19, 0.92]
return scores, sum(_a ) / len(_a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""") as mock_download_model:
_A = None
with patch("""comet.load_from_checkpoint""") as mock_load_from_checkpoint:
_A = load_from_checkpoint
yield
def snake_case ( ) -> Any:
_A = load_metric(os.path.join("""metrics""" , """seqeval"""))
_A = "ERROR"
_A = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(snake_case__ , match=re.escape(snake_case__)):
metric.compute(predictions=[] , references=[] , scheme=snake_case__)
| 370 | from collections.abc import Sequence
def snake_case ( snake_case__ :Sequence[float] , snake_case__ :bool = False) -> float:
if not arr:
return 0
_A = 0 if allow_empty_subarrays else float("""-inf""")
_A = 0.0
for num in arr:
_A = max(0 if allow_empty_subarrays else num , curr_sum + num)
_A = max(snake_case__ , snake_case__)
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(F'''{max_subarray_sum(nums) = }''')
| 81 | 0 |
"""simple docstring"""
from collections import namedtuple
_lowercase = namedtuple('''from_to''', '''from_ to''')
_lowercase = {
'''cubicmeter''': from_to(1, 1),
'''litre''': from_to(0.001, 10_00),
'''kilolitre''': from_to(1, 1),
'''gallon''': from_to(0.00_454, 264.172),
'''cubicyard''': from_to(0.76_455, 1.30_795),
'''cubicfoot''': from_to(0.028, 35.3_147),
'''cup''': from_to(0.000_236_588, 4_226.75),
}
def _snake_case ( snake_case__ : float , snake_case__ : str , snake_case__ : str ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'from_type\' value: {from_type!r} Supported values are:\n'
+ ', '.join(snake_case__ ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F'Invalid \'to_type\' value: {to_type!r}. Supported values are:\n'
+ ', '.join(snake_case__ ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod() | 74 |
"""simple docstring"""
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = (CMStochasticIterativeScheduler,)
lowercase__ = 10
def _UpperCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : str):
"""simple docstring"""
lowercase_ = {
"""num_train_timesteps""": 2_0_1,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowerCAmelCase_)
return config
def _UpperCAmelCase ( self : Optional[int]):
"""simple docstring"""
lowercase_ = 1_0
lowercase_ = self.get_scheduler_config()
lowercase_ = self.scheduler_classes[0](**lowerCAmelCase_)
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps[0]
lowercase_ = scheduler.timesteps[1]
lowercase_ = self.dummy_sample
lowercase_ = 0.1 * sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowerCAmelCase_)
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = 1
scheduler.set_timesteps(lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowerCAmelCase_):
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 192.7_614) < 1E-2
assert abs(result_mean.item() - 0.2_510) < 1E-3
def _UpperCAmelCase ( self : List[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [1_0_6, 0]
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
lowercase_ = scheduler.timesteps
lowercase_ = torch.manual_seed(0)
lowercase_ = self.dummy_model()
lowercase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
lowercase_ = scheduler.scale_model_input(lowerCAmelCase_ , lowerCAmelCase_)
# 2. predict noise residual
lowercase_ = model(lowerCAmelCase_ , lowerCAmelCase_)
# 3. predict previous sample x_t-1
lowercase_ = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , generator=lowerCAmelCase_).prev_sample
lowercase_ = pred_prev_sample
lowercase_ = torch.sum(torch.abs(lowerCAmelCase_))
lowercase_ = torch.mean(torch.abs(lowerCAmelCase_))
assert abs(result_sum.item() - 347.6_357) < 1E-2
assert abs(result_mean.item() - 0.4_527) < 1E-3
def _UpperCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1_5, 0]
with self.assertRaises(lowerCAmelCase_ , msg="""`timesteps` must be in descending order."""):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : Optional[Any]):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [3_9, 3_0, 1_2, 1, 0]
lowercase_ = len(lowerCAmelCase_)
with self.assertRaises(lowerCAmelCase_ , msg="""Can only pass one of `num_inference_steps` or `timesteps`."""):
scheduler.set_timesteps(num_inference_steps=lowerCAmelCase_ , timesteps=lowerCAmelCase_)
def _UpperCAmelCase ( self : int):
"""simple docstring"""
lowercase_ = self.scheduler_classes[0]
lowercase_ = self.get_scheduler_config()
lowercase_ = scheduler_class(**lowerCAmelCase_)
lowercase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowerCAmelCase_ , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=lowerCAmelCase_)
| 136 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase):
def __init__( self: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str=7 , _lowerCAmelCase: str=3 , _lowerCAmelCase: int=18 , _lowerCAmelCase: List[str]=30 , _lowerCAmelCase: Optional[Any]=4_00 , _lowerCAmelCase: List[str]=True , _lowerCAmelCase: int=None , _lowerCAmelCase: List[str]=True , ):
lowercase :Union[str, Any] = size if size is not None else {"height": 18, "width": 18}
lowercase :int = parent
lowercase :Optional[Any] = batch_size
lowercase :Tuple = num_channels
lowercase :int = image_size
lowercase :List[Any] = min_resolution
lowercase :Any = max_resolution
lowercase :Union[str, Any] = do_resize
lowercase :int = size
lowercase :List[Any] = do_normalize
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCAmelCase ( lowerCAmelCase , unittest.TestCase):
_a = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :List[Any] = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self: Dict ):
lowercase :Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCAmelCase , "clusters" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "size" ) )
self.assertTrue(hasattr(_lowerCAmelCase , "do_normalize" ) )
def SCREAMING_SNAKE_CASE ( self: List[Any] ):
lowercase :Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
lowercase :List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
lowercase :Optional[int] = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , obj[key] ) )
else:
self.assertEqual(obj[key] , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :Any = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase :Union[str, Any] = os.path.join(_lowerCAmelCase , "image_processor.json" )
image_processor_first.to_json_file(_lowerCAmelCase )
lowercase :Union[str, Any] = self.image_processing_class.from_json_file(_lowerCAmelCase ).to_dict()
lowercase :str = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
lowercase :Tuple = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_lowerCAmelCase )
lowercase :Optional[int] = self.image_processing_class.from_pretrained(_lowerCAmelCase ).to_dict()
lowercase :Dict = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_lowerCAmelCase , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _lowerCAmelCase )
@unittest.skip("ImageGPT requires clusters at initialization" )
def SCREAMING_SNAKE_CASE ( self: Optional[Any] ):
pass
def UpperCAmelCase__ ( ):
lowercase :Optional[Any] = load_dataset("hf-internal-testing/fixtures_image_utils", split="test" )
lowercase :Union[str, Any] = Image.open(dataset[4]["file"] )
lowercase :Optional[int] = Image.open(dataset[5]["file"] )
lowercase :List[str] = [imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
@slow
def SCREAMING_SNAKE_CASE ( self: Optional[int] ):
lowercase :List[Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
lowercase :str = prepare_images()
# test non-batched
lowercase :int = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
lowercase :Optional[int] = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _lowerCAmelCase )
# test batched
lowercase :Optional[int] = image_processing(_lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
lowercase :int = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _lowerCAmelCase )
| 158 |
from collections.abc import Iterable
from typing import Generic, TypeVar
_UpperCAmelCase : Any = TypeVar("_T")
class __lowerCAmelCase ( Generic[_T]):
def __init__( self: Union[str, Any] , _lowerCAmelCase: Iterable[_T] | None = None ):
lowercase :list[_T] = list(iterable or [] )
lowercase :list[_T] = []
def __len__( self: Dict ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self: List[Any] ):
return F"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: _T ):
self._stacka.append(_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :int = self._stacka.pop
lowercase :List[Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 158 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''facebook/data2vec-text-base''': '''https://huggingface.co/data2vec/resolve/main/config.json''',
}
class _a ( a__ ):
'''simple docstring'''
A : List[str] = '''data2vec-text'''
def __init__( self, A=30_522, A=768, A=12, A=12, A=3_072, A="gelu", A=0.1, A=0.1, A=512, A=2, A=0.02, A=1E-12, A=1, A=0, A=2, A="absolute", A=True, A=None, **A, ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase_, bos_token_id=UpperCamelCase_, eos_token_id=UpperCamelCase_, **UpperCamelCase_ )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : List[str] = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : Any = hidden_act
SCREAMING_SNAKE_CASE : List[str] = intermediate_size
SCREAMING_SNAKE_CASE : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : int = type_vocab_size
SCREAMING_SNAKE_CASE : List[Any] = initializer_range
SCREAMING_SNAKE_CASE : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = position_embedding_type
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout
class _a ( a__ ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE : Optional[int] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 251 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class snake_case_( unittest.TestCase ):
def __init__( self : List[Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any]=1_3 , UpperCamelCase_ : Tuple=7 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=True , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : List[str]=9_9 , UpperCamelCase_ : str=3_2 , UpperCamelCase_ : Union[str, Any]=5 , UpperCamelCase_ : int=4 , UpperCamelCase_ : Optional[Any]=3_7 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : str=5_1_2 , UpperCamelCase_ : Optional[Any]=1_6 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Union[str, Any]=4 , ):
lowerCAmelCase : str = parent
lowerCAmelCase : List[str] = batch_size
lowerCAmelCase : int = seq_length
lowerCAmelCase : str = is_training
lowerCAmelCase : Tuple = use_attention_mask
lowerCAmelCase : Dict = use_token_type_ids
lowerCAmelCase : Optional[int] = use_labels
lowerCAmelCase : Optional[Any] = vocab_size
lowerCAmelCase : Optional[int] = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : str = num_attention_heads
lowerCAmelCase : Optional[Any] = intermediate_size
lowerCAmelCase : int = hidden_act
lowerCAmelCase : int = hidden_dropout_prob
lowerCAmelCase : Tuple = attention_probs_dropout_prob
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : str = type_vocab_size
lowerCAmelCase : str = type_sequence_label_size
lowerCAmelCase : Any = initializer_range
lowerCAmelCase : int = num_choices
def lowerCamelCase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[int] = None
if self.use_attention_mask:
lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase : Union[str, Any] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase__ ( self : int ):
lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Optional[Any] = config_and_inputs
lowerCAmelCase : Optional[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase : Tuple = config_and_inputs
lowerCAmelCase : str = True
lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class snake_case_( a__ , unittest.TestCase ):
__UpperCamelCase = True
__UpperCamelCase = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Any = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCamelCase__ ( self : List[str] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : int = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class snake_case_( unittest.TestCase ):
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : str = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : Any = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
lowerCAmelCase : Union[str, Any] = model(UpperCamelCase_ )[0]
lowerCAmelCase : str = [1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape ) , UpperCamelCase_ )
# compare the actual values for a slice.
lowerCAmelCase : Optional[Any] = np.array(
[[[40.4_880, 18.0_199, -5.2_367], [-1.8_877, -4.0_885, 10.7_085], [-2.2_613, -5.6_110, 7.2_665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase__ ( self : List[str] ):
lowerCAmelCase : Dict = FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCamelCase_ )
lowerCAmelCase : str = np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa )
lowerCAmelCase : str = model(UpperCamelCase_ )[0]
# compare the actual values for a slice.
lowerCAmelCase : str = np.array(
[[[0.0_208, -0.0_356, 0.0_237], [-0.1_569, -0.0_411, -0.2_626], [0.1_879, 0.0_125, -0.0_089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 60 | 0 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 365 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"""configuration_poolformer""": [
"""POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""PoolFormerConfig""",
"""PoolFormerOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ["""PoolFormerFeatureExtractor"""]
_lowercase : Any = ["""PoolFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
"""POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PoolFormerForImageClassification""",
"""PoolFormerModel""",
"""PoolFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_poolformer import (
POOLFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
PoolFormerConfig,
PoolFormerOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_poolformer import PoolFormerFeatureExtractor
from .image_processing_poolformer import PoolFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_poolformer import (
POOLFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
PoolFormerForImageClassification,
PoolFormerModel,
PoolFormerPreTrainedModel,
)
else:
import sys
_lowercase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 91 | 0 |
"""simple docstring"""
from math import factorial, pi
def __UpperCAmelCase ( lowercase ,lowercase = 30 ):
"""simple docstring"""
if not isinstance(lowercase ,(int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(lowercase ,lowercase ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
_UpperCAmelCase = float(lowercase )
_UpperCAmelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(lowercase ) )
def __UpperCAmelCase ( lowercase ,lowercase = 30 ):
"""simple docstring"""
if not isinstance(lowercase ,(int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(lowercase ,lowercase ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
_UpperCAmelCase = float(lowercase )
_UpperCAmelCase = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 289 | """simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"""b0""": efficientnet.EfficientNetBa,
"""b1""": efficientnet.EfficientNetBa,
"""b2""": efficientnet.EfficientNetBa,
"""b3""": efficientnet.EfficientNetBa,
"""b4""": efficientnet.EfficientNetBa,
"""b5""": efficientnet.EfficientNetBa,
"""b6""": efficientnet.EfficientNetBa,
"""b7""": efficientnet.EfficientNetBa,
}
UpperCAmelCase__ = {
"""b0""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.0,
"""image_size""": 2_2_4,
"""dropout_rate""": 0.2,
"""dw_padding""": [],
},
"""b1""": {
"""hidden_dim""": 1_2_8_0,
"""width_coef""": 1.0,
"""depth_coef""": 1.1,
"""image_size""": 2_4_0,
"""dropout_rate""": 0.2,
"""dw_padding""": [1_6],
},
"""b2""": {
"""hidden_dim""": 1_4_0_8,
"""width_coef""": 1.1,
"""depth_coef""": 1.2,
"""image_size""": 2_6_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 8, 1_6],
},
"""b3""": {
"""hidden_dim""": 1_5_3_6,
"""width_coef""": 1.2,
"""depth_coef""": 1.4,
"""image_size""": 3_0_0,
"""dropout_rate""": 0.3,
"""dw_padding""": [5, 1_8],
},
"""b4""": {
"""hidden_dim""": 1_7_9_2,
"""width_coef""": 1.4,
"""depth_coef""": 1.8,
"""image_size""": 3_8_0,
"""dropout_rate""": 0.4,
"""dw_padding""": [6],
},
"""b5""": {
"""hidden_dim""": 2_0_4_8,
"""width_coef""": 1.6,
"""depth_coef""": 2.2,
"""image_size""": 4_5_6,
"""dropout_rate""": 0.4,
"""dw_padding""": [1_3, 2_7],
},
"""b6""": {
"""hidden_dim""": 2_3_0_4,
"""width_coef""": 1.8,
"""depth_coef""": 2.6,
"""image_size""": 5_2_8,
"""dropout_rate""": 0.5,
"""dw_padding""": [3_1],
},
"""b7""": {
"""hidden_dim""": 2_5_6_0,
"""width_coef""": 2.0,
"""depth_coef""": 3.1,
"""image_size""": 6_0_0,
"""dropout_rate""": 0.5,
"""dw_padding""": [1_8],
},
}
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = EfficientNetConfig()
_UpperCAmelCase = CONFIG_MAP[model_name]["""hidden_dim"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""width_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""depth_coef"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dropout_rate"""]
_UpperCAmelCase = CONFIG_MAP[model_name]["""dw_padding"""]
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """imagenet-1k-id2label.json"""
_UpperCAmelCase = 10_00
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
_UpperCAmelCase = {int(lowercase ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( ):
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase ,stream=lowercase ).raw )
return im
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} ,image_mean=[0.4_85, 0.4_56, 0.4_06] ,image_std=[0.47_85_39_44, 0.4_73_28_64, 0.47_43_41_63] ,do_center_crop=lowercase ,)
return preprocessor
def __UpperCAmelCase ( lowercase ):
"""simple docstring"""
_UpperCAmelCase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
_UpperCAmelCase = sorted(set(lowercase ) )
_UpperCAmelCase = len(lowercase )
_UpperCAmelCase = {b: str(lowercase ) for b, i in zip(lowercase ,range(lowercase ) )}
_UpperCAmelCase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
_UpperCAmelCase = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
_UpperCAmelCase = {}
for item in rename_keys:
if item[0] in original_param_names:
_UpperCAmelCase = """efficientnet.""" + item[1]
_UpperCAmelCase = """classifier.weight"""
_UpperCAmelCase = """classifier.bias"""
return key_mapping
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ):
"""simple docstring"""
for key, value in tf_params.items():
if "normalization" in key:
continue
_UpperCAmelCase = key_mapping[key]
if "_conv" in key and "kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
_UpperCAmelCase = torch.from_numpy(lowercase ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
_UpperCAmelCase = torch.from_numpy(np.transpose(lowercase ) )
else:
_UpperCAmelCase = torch.from_numpy(lowercase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(lowercase )
@torch.no_grad()
def __UpperCAmelCase ( lowercase ,lowercase ,lowercase ,lowercase ):
"""simple docstring"""
_UpperCAmelCase = model_classes[model_name](
include_top=lowercase ,weights="""imagenet""" ,input_tensor=lowercase ,input_shape=lowercase ,pooling=lowercase ,classes=10_00 ,classifier_activation="""softmax""" ,)
_UpperCAmelCase = original_model.trainable_variables
_UpperCAmelCase = original_model.non_trainable_variables
_UpperCAmelCase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
_UpperCAmelCase = param.numpy()
_UpperCAmelCase = list(tf_params.keys() )
# Load HuggingFace model
_UpperCAmelCase = get_efficientnet_config(lowercase )
_UpperCAmelCase = EfficientNetForImageClassification(lowercase ).eval()
_UpperCAmelCase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
_UpperCAmelCase = rename_keys(lowercase )
replace_params(lowercase ,lowercase ,lowercase )
# Initialize preprocessor and preprocess input image
_UpperCAmelCase = convert_image_processor(lowercase )
_UpperCAmelCase = preprocessor(images=prepare_img() ,return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
_UpperCAmelCase = hf_model(**lowercase )
_UpperCAmelCase = outputs.logits.detach().numpy()
# Original model inference
_UpperCAmelCase = False
_UpperCAmelCase = CONFIG_MAP[model_name]["""image_size"""]
_UpperCAmelCase = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
_UpperCAmelCase = image.img_to_array(lowercase )
_UpperCAmelCase = np.expand_dims(lowercase ,axis=0 )
_UpperCAmelCase = original_model.predict(lowercase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(lowercase ,lowercase ,atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(lowercase ):
os.mkdir(lowercase )
# Save converted model and image processor
hf_model.save_pretrained(lowercase )
preprocessor.save_pretrained(lowercase )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
_UpperCAmelCase = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(lowercase )
hf_model.push_to_hub(lowercase )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""b0""",
type=str,
help="""Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""hf_model""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--save_model""", action="""store_true""", help="""Save model to local""")
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Push model and image processor to the hub""")
UpperCAmelCase__ = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 289 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def snake_case_ ( lowerCAmelCase_ : Optional[int] ):
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def snake_case_ ( lowerCAmelCase_ : Tuple ):
__lowercase : int = create_tensor(lowerCAmelCase_ )
__lowercase : str = gather(lowerCAmelCase_ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[str] = [state.process_index]
__lowercase : int = gather_object(lowerCAmelCase_ )
assert len(lowerCAmelCase_ ) == state.num_processes, F"{gathered_obj}, {len(lowerCAmelCase_ )} != {state.num_processes}"
assert gathered_obj == list(range(state.num_processes ) ), F"{gathered_obj} != {list(range(state.num_processes ) )}"
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
__lowercase : List[str] = create_tensor(lowerCAmelCase_ )
__lowercase : str = broadcast(lowerCAmelCase_ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def snake_case_ ( lowerCAmelCase_ : Any ):
# We need to pad the tensor with one more element if we are the main process
# to ensure that we can pad
if state.is_main_process:
__lowercase : Tuple = torch.arange(state.num_processes + 1 ).to(state.device )
else:
__lowercase : str = torch.arange(state.num_processes ).to(state.device )
__lowercase : List[Any] = pad_across_processes(lowerCAmelCase_ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def snake_case_ ( lowerCAmelCase_ : Optional[Any] ):
# For now runs on only two processes
if state.num_processes != 2:
return
__lowercase : Union[str, Any] = create_tensor(lowerCAmelCase_ )
__lowercase : List[Any] = reduce(lowerCAmelCase_ , """sum""" )
__lowercase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), F"{reduced_tensor} != {truth_tensor}"
def snake_case_ ( lowerCAmelCase_ : Tuple ):
# For now runs on only two processes
if state.num_processes != 2:
return
__lowercase : Optional[int] = create_tensor(lowerCAmelCase_ )
__lowercase : Tuple = reduce(lowerCAmelCase_ , """mean""" )
__lowercase : Optional[Any] = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ ), F"{reduced_tensor} != {truth_tensor}"
def snake_case_ ( lowerCAmelCase_ : Union[str, Any] ):
# For xla_spawn (TPUs)
main()
def snake_case_ ( ):
__lowercase : Optional[int] = PartialState()
state.print(F"State: {state}" )
state.print("""testing gather""" )
test_gather(lowerCAmelCase_ )
state.print("""testing gather_object""" )
test_gather_object(lowerCAmelCase_ )
state.print("""testing broadcast""" )
test_broadcast(lowerCAmelCase_ )
state.print("""testing pad_across_processes""" )
test_pad_across_processes(lowerCAmelCase_ )
state.print("""testing reduce_sum""" )
test_reduce_sum(lowerCAmelCase_ )
state.print("""testing reduce_mean""" )
test_reduce_mean(lowerCAmelCase_ )
if __name__ == "__main__":
main() | 306 |
from __future__ import annotations
def snake_case_ ( lowerCAmelCase_ : int ):
__lowercase : List[str] = 2
__lowercase : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 306 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _A ( _a ,_a ,_a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = StableDiffusionInpaintPipeline
UpperCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCAmelCase : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCAmelCase : Union[str, Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCAmelCase : int = frozenset([] )
def __snake_case ( self : Dict):
torch.manual_seed(0)
a : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__UpperCAmelCase , )
a : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase)
torch.manual_seed(0)
a : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0)
a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
a : Any = CLIPTextModel(__UpperCAmelCase)
a : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a : Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]=0):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
a : Dict = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCAmelCase)).to(__UpperCAmelCase)
a : List[str] = image.cpu().permute(0 , 2 , 3 , 1)[0]
a : Union[str, Any] = Image.fromarray(np.uinta(__UpperCAmelCase)).convert("RGB").resize((64, 64))
a : Dict = Image.fromarray(np.uinta(image + 4)).convert("RGB").resize((64, 64))
if str(__UpperCAmelCase).startswith("mps"):
a : Tuple = torch.manual_seed(__UpperCAmelCase)
else:
a : Tuple = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : List[str]):
a : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
a : Tuple = self.get_dummy_components()
a : Optional[int] = StableDiffusionInpaintPipeline(**__UpperCAmelCase)
a : int = sd_pipe.to(__UpperCAmelCase)
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a : Any = self.get_dummy_inputs(__UpperCAmelCase)
a : Optional[int] = sd_pipe(**__UpperCAmelCase).images
a : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a : int = np.array([0.4_727, 0.5_735, 0.3_941, 0.5_446, 0.5_926, 0.4_394, 0.5_062, 0.4_654, 0.4_476])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
def __snake_case ( self : str):
super().test_inference_batch_single_identical(expected_max_diff=3e-3)
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Union[str, Any]):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : Dict):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy")
a : Tuple = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(__UpperCAmelCase , safety_checker=__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Any = "Face of a yellow cat, high resolution, sitting on a park bench"
a : str = torch.manual_seed(0)
a : Union[str, Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 9e-3
def __snake_case ( self : Any):
a : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : str = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Any = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , torch_dtype=torch.floataa , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Dict = torch.manual_seed(0)
a : List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , output_type="np" , )
a : Optional[Any] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def __snake_case ( self : int):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
a : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png")
a : Optional[int] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png")
a : Optional[Any] = "stabilityai/stable-diffusion-2-inpainting"
a : Optional[int] = PNDMScheduler.from_pretrained(__UpperCAmelCase , subfolder="scheduler")
a : int = StableDiffusionInpaintPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , scheduler=__UpperCAmelCase , torch_dtype=torch.floataa , )
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
a : Optional[int] = "Face of a yellow cat, high resolution, sitting on a park bench"
a : Optional[int] = torch.manual_seed(0)
a : str = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="np" , )
a : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 40 |
"""simple docstring"""
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
lowerCamelCase_ : Dict = logging.getLogger(__name__)
lowerCamelCase_ : Tuple = """pytorch_model.bin"""
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, )
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
__lowerCAmelCase = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "A csv or a json file containing the validation data."} )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The name of the task to train on."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __A :
"""simple docstring"""
__lowerCAmelCase = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
__lowerCAmelCase = dataclasses.field(
default="accuracy", metadata={"help": "The evaluation metric used for the task."} )
__lowerCAmelCase = dataclasses.field(
default="no", metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
}, )
__lowerCAmelCase = dataclasses.field(
default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__lowerCAmelCase = dataclasses.field(
default=0.0, metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, )
__lowerCAmelCase = dataclasses.field(
default=0.0, metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, )
__lowerCAmelCase = dataclasses.field(
default=100, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, )
__lowerCAmelCase = dataclasses.field(
default=_SCREAMING_SNAKE_CASE, metadata={"help": "Random seed for initialization."}, )
def _A ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ):
"""simple docstring"""
a =datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
a =dataset.filter(lambda lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
a =int(eval_result * len(lowercase ) )
print(lowercase )
a =dataset.sort('''probability''' , reverse=lowercase )
a =dataset.select(range(lowercase ) )
a =dataset.remove_columns(['''label''', '''probability'''] )
a =dataset.rename_column('''prediction''' , '''label''' )
a =dataset.map(lambda lowercase : {"label": idalabel[example["label"]]} )
a =dataset.shuffle(seed=args.seed )
a =os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(lowercase , index=lowercase )
else:
dataset.to_json(lowercase )
def _A ( lowercase , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
a =Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
a =STModelArguments(model_name_or_path=lowercase )
a =STDataArguments(train_file=lowercase , infer_file=lowercase )
a =STTrainingArguments(output_dir=lowercase )
a =argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(lowercase ).items():
setattr(lowercase , lowercase , lowercase )
for key, value in kwargs.items():
if hasattr(lowercase , lowercase ):
setattr(lowercase , lowercase , lowercase )
# Sanity checks
a ={}
a =None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
a =args.train_file
a =args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
a =args.eval_file
for key in data_files:
a =data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
a =extension
else:
assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
a =f'''{args.output_dir}/self-train_iter-{{}}'''.format
a =data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=lowercase )
os.makedirs(lowercase , exist_ok=lowercase )
accelerator.wait_for_everyone()
a =None
a =None
a =0
a =False
# Show the progress bar
a =tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
a =data_dir_format(lowercase )
assert os.path.exists(lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
a =os.path.join(lowercase , '''stage-1''' )
a ={
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(lowercase , lowercase ):
arguments_dict.update({key: value} )
a =os.path.join(lowercase , '''best-checkpoint''' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , lowercase , lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
a =os.path.join(lowercase , '''best-checkpoint''' )
a =os.path.join(lowercase , '''stage-2''' )
# Update arguments_dict
a =model_path
a =data_files['''train''']
a =current_output_dir
a =os.path.join(lowercase , '''best-checkpoint''' , lowercase )
if os.path.exists(lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , lowercase , lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , lowercase )
finetune(**lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , lowercase )
a =iteration
a =data_dir_format(iteration + 1 )
a =AutoConfig.from_pretrained(os.path.join(lowercase , '''best-checkpoint''' ) )
a =config.idalabel
a =os.path.join(lowercase , '''eval_results_best-checkpoint.json''' )
a =os.path.join(lowercase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(lowercase )
with open(lowercase , '''r''' ) as f:
a =float(json.load(lowercase )[args.eval_metric] )
a =os.path.join(lowercase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(lowercase )
# Loading the dataset from local csv or json files.
a =load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
a =load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(lowercase , exist_ok=lowercase )
shutil.copy(lowercase , os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(lowercase ):
shutil.copy(lowercase , os.path.join(lowercase , f'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(lowercase , lowercase , lowercase , lowercase , lowercase , lowercase )
accelerator.wait_for_everyone()
a =os.path.join(lowercase , f'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
a =eval_result
if best_iteration is None:
a =new_iteration
a =new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
a =new_iteration
a =new_eval_result
a =0
else:
if new_eval_result == best_eval_result:
a =new_iteration
a =new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
a =True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , lowercase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{iteration}.json''' ) , os.path.join(lowercase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(lowercase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(lowercase , '''eval_results_best-iteration.json''' ) , ) | 81 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "roberta"
def __init__( self , _a=5_0_2_6_5 , _a=7_6_8 , _a=1_2 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=1 , _a=0 , _a=2 , _a="absolute" , _a=True , _a=None , **_a , ) -> Optional[Any]:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : int = hidden_size
_a : str = num_hidden_layers
_a : int = num_attention_heads
_a : Tuple = hidden_act
_a : str = intermediate_size
_a : List[Any] = hidden_dropout_prob
_a : Union[str, Any] = attention_probs_dropout_prob
_a : List[Any] = max_position_embeddings
_a : Union[str, Any] = type_vocab_size
_a : int = initializer_range
_a : Optional[int] = layer_norm_eps
_a : Dict = position_embedding_type
_a : int = use_cache
_a : List[str] = classifier_dropout
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_a : Optional[int] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
_a : List[str] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 15 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : Optional[int] ,__a : List[str] ,__a : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Optional[Any] = getattr(__a ,__a )
if weight_type is not None:
_a : Dict = getattr(__a ,__a ).shape
else:
_a : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
_a : List[Any] = value
elif weight_type == "weight_g":
_a : Any = value
elif weight_type == "weight_v":
_a : Union[str, Any] = value
elif weight_type == "bias":
_a : Optional[int] = value
else:
_a : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Any ,__a : Union[str, Any] ,__a : Union[str, Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = []
_a : Union[str, Any] = fairseq_model.state_dict()
_a : Union[str, Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_a : int = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : Optional[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : Union[str, Any] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned):
_a : Any = True
if "*" in mapped_key:
_a : Optional[int] = name.split(__a )[0].split('''.''' )[-2]
_a : Any = mapped_key.replace('''*''' ,__a )
if "weight_g" in name:
_a : List[Any] = '''weight_g'''
elif "weight_v" in name:
_a : List[str] = '''weight_v'''
elif "weight" in name:
_a : Any = '''weight'''
elif "bias" in name:
_a : str = '''bias'''
else:
_a : Any = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : Optional[Any] ,__a : Dict ,__a : List[str] ,__a : Any ) -> Tuple:
"""simple docstring"""
_a : int = full_name.split('''conv_layers.''' )[-1]
_a : Any = name.split('''.''' )
_a : List[Any] = int(items[0] )
_a : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
_a : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
_a : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
_a : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : List[str]=None ,__a : Optional[int]=None ,__a : int=True ) -> List[Any]:
"""simple docstring"""
if config_path is not None:
_a : Tuple = HubertConfig.from_pretrained(__a )
else:
_a : Any = HubertConfig()
if is_finetuned:
if dict_path:
_a : Tuple = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : Any = target_dict.pad_index
_a : Tuple = target_dict.bos_index
_a : Optional[int] = target_dict.eos_index
_a : Optional[Any] = len(target_dict.symbols )
_a : Tuple = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : List[Any] = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Tuple = HubertForCTC(__a )
else:
_a : Tuple = HubertModel(__a )
if is_finetuned:
_a , _a , _a : int = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a , _a , _a : str = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_a : Any = model[0].eval()
recursively_load_weights(__a ,__a ,__a )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 15 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
@slow
def _snake_case ( self ) -> int:
_lowerCAmelCase = FlaxMTaForConditionalGeneration.from_pretrained("google/mt5-small" )
_lowerCAmelCase = AutoTokenizer.from_pretrained("google/mt5-small" )
_lowerCAmelCase = tokenizer("Hello there" , return_tensors="np" ).input_ids
_lowerCAmelCase = tokenizer("Hi I am" , return_tensors="np" ).input_ids
_lowerCAmelCase = shift_tokens_right(_lowerCAmelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
_lowerCAmelCase = model(_lowerCAmelCase , decoder_input_ids=_lowerCAmelCase ).logits
_lowerCAmelCase = optax.softmax_cross_entropy(_lowerCAmelCase , onehot(_lowerCAmelCase , logits.shape[-1] ) ).mean()
_lowerCAmelCase = -(labels.shape[-1] * loss.item())
_lowerCAmelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 158 |
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int = 1000 ):
'''simple docstring'''
return sum(e for e in range(3 , SCREAMING_SNAKE_CASE_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 158 | 1 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import PNDMPipeline, PNDMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
torch.manual_seed(0 )
_UpperCamelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase : Union[str, Any] = self.dummy_uncond_unet
_UpperCamelCase : List[Any] = PNDMScheduler()
_UpperCamelCase : Any = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
_UpperCamelCase : Optional[int] = torch.manual_seed(0 )
_UpperCamelCase : str = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" ).images
_UpperCamelCase : int = torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = pndm(generator=__a , num_inference_steps=20 , output_type="numpy" , return_dict=__a )[0]
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
_UpperCamelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : Optional[int] = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
_UpperCamelCase : Optional[int] = "google/ddpm-cifar10-32"
_UpperCamelCase : Optional[int] = UNetaDModel.from_pretrained(__a )
_UpperCamelCase : Optional[Any] = PNDMScheduler()
_UpperCamelCase : str = PNDMPipeline(unet=__a , scheduler=__a )
pndm.to(__a )
pndm.set_progress_bar_config(disable=__a )
_UpperCamelCase : Union[str, Any] = torch.manual_seed(0 )
_UpperCamelCase : Tuple = pndm(generator=__a , output_type="numpy" ).images
_UpperCamelCase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCamelCase : str = np.array([0.15_64, 0.1_46_45, 0.14_06, 0.1_47_15, 0.1_24_25, 0.1_40_45, 0.1_31_15, 0.1_21_75, 0.1_25] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 368 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
lowerCamelCase__ = True
except ImportError:
lowerCamelCase__ = False
try:
from torch.hub import _get_torch_home
lowerCamelCase__ = _get_torch_home()
except ImportError:
lowerCamelCase__ = os.path.expanduser(
os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch"))
)
lowerCamelCase__ = os.path.join(torch_cache_home, "transformers")
lowerCamelCase__ = "https://cdn.huggingface.co"
lowerCamelCase__ = "https://s3.amazonaws.com/models.huggingface.co/bert"
lowerCamelCase__ = "/".join(str(Path(__file__).resolve()).split("/")[:-1])
lowerCamelCase__ = os.path.join(PATH, "config.yaml")
lowerCamelCase__ = os.path.join(PATH, "attributes.txt")
lowerCamelCase__ = os.path.join(PATH, "objects.txt")
lowerCamelCase__ = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path)
lowerCamelCase__ = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE)
lowerCamelCase__ = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE)
lowerCamelCase__ = "pytorch_model.bin"
lowerCamelCase__ = "config.yaml"
def lowercase__ ( lowercase_=OBJECTS ,lowercase_=ATTRIBUTES ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase : str = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_classes.append(object.split("," )[0].lower().strip() )
_UpperCamelCase : Any = []
with open(lowercase_ ) as f:
for object in f.readlines():
vg_attrs.append(object.split("," )[0].lower().strip() )
return vg_classes, vg_attrs
def lowercase__ ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase : List[str] = OrderedDict()
with open(lowercase_ ,"rb" ) as f:
_UpperCamelCase : List[str] = pkl.load(lowercase_ )["model"]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCamelCase : List[str] = ckp.pop(lowercase_ )
if isinstance(lowercase_ ,np.ndarray ):
_UpperCamelCase : List[Any] = torch.tensor(lowercase_ )
else:
assert isinstance(lowercase_ ,torch.tensor ), type(lowercase_ )
_UpperCamelCase : Optional[Any] = v
return r
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = {}
def __init__( self : str , __a : dict , __a : str = "root" , __a : Any=0 ) -> Any:
_UpperCamelCase : Optional[Any] = name
_UpperCamelCase : Optional[Any] = level
_UpperCamelCase : Union[str, Any] = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCamelCase : Optional[int] = copy.deepcopy(__a )
_UpperCamelCase : Dict = copy.deepcopy(__a )
if isinstance(__a , __a ):
_UpperCamelCase : Union[str, Any] = Config(__a , name=__a , level=level + 1 )
_UpperCamelCase : Optional[Any] = v
setattr(self , __a , __a )
_UpperCamelCase : Optional[Any] = d
def __repr__( self : List[str] ) -> List[Any]:
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Dict , __a : Union[str, Any] , __a : Optional[int] ) -> int:
_UpperCamelCase : Any = val
_UpperCamelCase : Optional[Any] = val
_UpperCamelCase : Dict = key.split("." )
_UpperCamelCase : int = len(__a ) - 1
_UpperCamelCase : List[str] = self._pointer
if len(__a ) > 1:
for i, l in enumerate(__a ):
if hasattr(self , __a ) and isinstance(getattr(self , __a ) , __a ):
setattr(getattr(self , __a ) , ".".join(levels[i:] ) , __a )
if l == last_level:
_UpperCamelCase : str = val
else:
_UpperCamelCase : List[str] = pointer[l]
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
return self._pointer
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __a : Tuple , __a : List[str] ) -> Dict:
with open(F'''{file_name}''' , "w" ) as stream:
dump(__a , __a )
def __SCREAMING_SNAKE_CASE ( self : int , __a : List[Any] , __a : Dict ) -> List[Any]:
with open(F'''{file_name}''' , "w" ) as stream:
json.dump(__a , __a )
@staticmethod
def __SCREAMING_SNAKE_CASE ( __a : Union[str, Any] ) -> Optional[int]:
with open(__a ) as stream:
_UpperCamelCase : int = load(__a , Loader=__a )
return data
def __str__( self : List[str] ) -> Tuple:
_UpperCamelCase : List[str] = " "
if self._name != "root":
_UpperCamelCase : Dict = F'''{t * (self._level-1)}{self._name}:\n'''
else:
_UpperCamelCase : Any = ""
_UpperCamelCase : Any = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__a , __a ):
r += F'''{t * (self._level)}{v}\n'''
self._level += 1
else:
r += F'''{t * (self._level)}{k}: {v} ({type(__a ).__name__})\n'''
_UpperCamelCase : Optional[Any] = level
return r[:-1]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Dict , __a : str , **__a : str ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : int = cls.get_config_dict(__a , **__a )
return cls(__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Optional[int] , __a : str , **__a : Union[str, Any] ) -> Tuple:
_UpperCamelCase : Tuple = kwargs.pop("cache_dir" , __a )
_UpperCamelCase : Optional[int] = kwargs.pop("force_download" , __a )
_UpperCamelCase : str = kwargs.pop("resume_download" , __a )
_UpperCamelCase : Any = kwargs.pop("proxies" , __a )
_UpperCamelCase : List[Any] = kwargs.pop("local_files_only" , __a )
if os.path.isdir(__a ):
_UpperCamelCase : Optional[Any] = os.path.join(__a , __a )
elif os.path.isfile(__a ) or is_remote_url(__a ):
_UpperCamelCase : Optional[int] = pretrained_model_name_or_path
else:
_UpperCamelCase : int = hf_bucket_url(__a , filename=__a , use_cdn=__a )
try:
# Load from URL or cache if already cached
_UpperCamelCase : Optional[int] = cached_path(
__a , cache_dir=__a , force_download=__a , proxies=__a , resume_download=__a , local_files_only=__a , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCamelCase : List[Any] = Config.load_yaml(__a )
except EnvironmentError:
_UpperCamelCase : Union[str, Any] = "Can't load config for"
raise EnvironmentError(__a )
if resolved_config_file == config_file:
print("loading configuration file from path" )
else:
print("loading configuration file cache" )
return Config.load_yaml(__a ), kwargs
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : str = torch.load("dump.pt" ,map_location=in_tensor.device )
_UpperCamelCase : str = in_tensor.numpy()
_UpperCamelCase : Union[str, Any] = out_tensor.numpy()[0]
print(na.shape ,na[0, 0, :5] )
print(na.shape ,na[0, 0, :5] )
assert np.allclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ), (
F'''{sum([1 for x in np.isclose(lowercase_ ,lowercase_ ,rtol=0.01 ,atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %'''
" element-wise mismatch"
)
raise Exception("tensors are all good" )
# Hugging face functions below
def lowercase__ ( lowercase_ ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Dict = urlparse(lowercase_ )
return parsed.scheme in ("http", "https")
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=True ) -> str:
"""simple docstring"""
_UpperCamelCase : int = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCamelCase : List[str] = "/" not in model_id
if legacy_format:
return F'''{endpoint}/{model_id}-{filename}'''
else:
return F'''{endpoint}/{model_id}/{filename}'''
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=0 ,lowercase_=None ,) -> List[Any]:
"""simple docstring"""
_UpperCamelCase : Optional[int] = "python/{}".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(lowercase_ ,lowercase_ ):
ua += "; " + "; ".join("{}/{}".format(lowercase_ ,lowercase_ ) for k, v in user_agent.items() )
elif isinstance(lowercase_ ,lowercase_ ):
ua += "; " + user_agent
_UpperCamelCase : Any = {"user-agent": ua}
if resume_size > 0:
_UpperCamelCase : str = "bytes=%d-" % (resume_size,)
_UpperCamelCase : str = requests.get(lowercase_ ,stream=lowercase_ ,proxies=lowercase_ ,headers=lowercase_ )
if response.status_code == 416: # Range not satisfiable
return
_UpperCamelCase : List[str] = response.headers.get("Content-Length" )
_UpperCamelCase : Union[str, Any] = resume_size + int(lowercase_ ) if content_length is not None else None
_UpperCamelCase : Optional[int] = tqdm(
unit="B" ,unit_scale=lowercase_ ,total=lowercase_ ,initial=lowercase_ ,desc="Downloading" ,)
for chunk in response.iter_content(chunk_size=1_024 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(lowercase_ ) )
temp_file.write(lowercase_ )
progress.close()
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=10 ,lowercase_=False ,lowercase_=None ,lowercase_=False ,) -> Tuple:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : str = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : Dict = str(lowercase_ )
os.makedirs(lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : Dict = None
if not local_files_only:
try:
_UpperCamelCase : List[Any] = requests.head(lowercase_ ,allow_redirects=lowercase_ ,proxies=lowercase_ ,timeout=lowercase_ )
if response.status_code == 200:
_UpperCamelCase : str = response.headers.get("ETag" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCamelCase : int = url_to_filename(lowercase_ ,lowercase_ )
# get cache path to put the file
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(lowercase_ ):
return cache_path
else:
_UpperCamelCase : Optional[int] = [
file
for file in fnmatch.filter(os.listdir(lowercase_ ) ,filename + ".*" )
if not file.endswith(".json" ) and not file.endswith(".lock" )
]
if len(lowercase_ ) > 0:
return os.path.join(lowercase_ ,matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"Cannot find the requested files in the cached path and outgoing traffic has been"
" disabled. To enable model look-ups and downloads online, set 'local_files_only'"
" to False." )
return None
# From now on, etag is not None.
if os.path.exists(lowercase_ ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCamelCase : Dict = cache_path + ".lock"
with FileLock(lowercase_ ):
# If the download just completed while the lock was activated.
if os.path.exists(lowercase_ ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCamelCase : List[str] = cache_path + ".incomplete"
@contextmanager
def _resumable_file_manager():
with open(lowercase_ ,"a+b" ) as f:
yield f
_UpperCamelCase : Union[str, Any] = _resumable_file_manager
if os.path.exists(lowercase_ ):
_UpperCamelCase : str = os.stat(lowercase_ ).st_size
else:
_UpperCamelCase : Dict = 0
else:
_UpperCamelCase : Tuple = partial(tempfile.NamedTemporaryFile ,dir=lowercase_ ,delete=lowercase_ )
_UpperCamelCase : Optional[Any] = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"%s not found in cache or force_download set to True, downloading to %s" ,lowercase_ ,temp_file.name ,)
http_get(
lowercase_ ,lowercase_ ,proxies=lowercase_ ,resume_size=lowercase_ ,user_agent=lowercase_ ,)
os.replace(temp_file.name ,lowercase_ )
_UpperCamelCase : Optional[int] = {"url": url, "etag": etag}
_UpperCamelCase : List[str] = cache_path + ".json"
with open(lowercase_ ,"w" ) as meta_file:
json.dump(lowercase_ ,lowercase_ )
return cache_path
def lowercase__ ( lowercase_ ,lowercase_=None ) -> int:
"""simple docstring"""
_UpperCamelCase : Optional[int] = url.encode("utf-8" )
_UpperCamelCase : List[str] = shaaaa(lowercase_ )
_UpperCamelCase : List[str] = url_hash.hexdigest()
if etag:
_UpperCamelCase : Optional[Any] = etag.encode("utf-8" )
_UpperCamelCase : Optional[Any] = shaaaa(lowercase_ )
filename += "." + etag_hash.hexdigest()
if url.endswith(".h5" ):
filename += ".h5"
return filename
def lowercase__ ( lowercase_ ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=None ,lowercase_=False ,lowercase_=False ,lowercase_=False ,) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCamelCase : List[Any] = TRANSFORMERS_CACHE
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if isinstance(lowercase_ ,lowercase_ ):
_UpperCamelCase : str = str(lowercase_ )
if is_remote_url(lowercase_ ):
# URL, so get it from the cache (downloading if necessary)
_UpperCamelCase : Union[str, Any] = get_from_cache(
lowercase_ ,cache_dir=lowercase_ ,force_download=lowercase_ ,proxies=lowercase_ ,resume_download=lowercase_ ,user_agent=lowercase_ ,local_files_only=lowercase_ ,)
elif os.path.exists(lowercase_ ):
# File, and it exists.
_UpperCamelCase : List[str] = url_or_filename
elif urlparse(lowercase_ ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("file {} not found".format(lowercase_ ) )
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(lowercase_ ) )
if extract_compressed_file:
if not is_zipfile(lowercase_ ) and not tarfile.is_tarfile(lowercase_ ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCamelCase, _UpperCamelCase : Any = os.path.split(lowercase_ )
_UpperCamelCase : Optional[int] = output_file.replace("." ,"-" ) + "-extracted"
_UpperCamelCase : Any = os.path.join(lowercase_ ,lowercase_ )
if os.path.isdir(lowercase_ ) and os.listdir(lowercase_ ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCamelCase : Optional[int] = output_path + ".lock"
with FileLock(lowercase_ ):
shutil.rmtree(lowercase_ ,ignore_errors=lowercase_ )
os.makedirs(lowercase_ )
if is_zipfile(lowercase_ ):
with ZipFile(lowercase_ ,"r" ) as zip_file:
zip_file.extractall(lowercase_ )
zip_file.close()
elif tarfile.is_tarfile(lowercase_ ):
_UpperCamelCase : int = tarfile.open(lowercase_ )
tar_file.extractall(lowercase_ )
tar_file.close()
else:
raise EnvironmentError("Archive format of {} could not be identified".format(lowercase_ ) )
return output_path_extracted
return output_path
def lowercase__ ( lowercase_ ,lowercase_="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
with open(lowercase_ ) as f:
_UpperCamelCase : Tuple = eval(f.read() )
else:
_UpperCamelCase : str = requests.get(lowercase_ )
try:
_UpperCamelCase : Optional[int] = requests.json()
except Exception:
_UpperCamelCase : Union[str, Any] = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCamelCase : List[Any] = eval(lowercase_ )
except Exception:
_UpperCamelCase : int = data.split("\n" )
req.close()
return data
def lowercase__ ( lowercase_ ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase : List[Any] = requests.get(lowercase_ )
_UpperCamelCase : Optional[int] = np.array(Image.open(BytesIO(response.content ) ) )
return img
def lowercase__ ( lowercase_ ) -> str:
"""simple docstring"""
_UpperCamelCase : List[Any] = url.split("/" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(lowercase_ )
with open(lowercase_ ,"rb" ) as stream:
_UpperCamelCase : Union[str, Any] = pkl.load(lowercase_ )
_UpperCamelCase : Union[str, Any] = weights.pop("model" )
_UpperCamelCase : Optional[int] = {}
for k, v in model.items():
_UpperCamelCase : str = torch.from_numpy(lowercase_ )
if "running_var" in k:
_UpperCamelCase : List[Any] = torch.tensor([0] )
_UpperCamelCase : str = k.replace("running_var" ,"num_batches_tracked" )
_UpperCamelCase : Any = zero
return new
def lowercase__ ( ) -> Dict:
"""simple docstring"""
print(F'''{os.path.abspath(os.path.join(lowercase_ ,os.pardir ) )}/demo.ipynb''' )
def lowercase__ ( lowercase_ ,lowercase_="RGB" ) -> int:
"""simple docstring"""
assert isinstance(lowercase_ ,lowercase_ )
if os.path.isfile(lowercase_ ):
_UpperCamelCase : Optional[Any] = cva.imread(lowercase_ )
else:
_UpperCamelCase : Optional[int] = get_image_from_url(lowercase_ )
assert img is not None, F'''could not connect to: {im}'''
_UpperCamelCase : Optional[int] = cva.cvtColor(lowercase_ ,cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCamelCase : List[Any] = img[:, :, ::-1]
return img
def lowercase__ ( lowercase_ ,lowercase_=1 ) -> List[Any]:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 ,len(lowercase_ ) ,lowercase_ ))
| 310 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__:Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {"""tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__:Any = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class snake_case__ ( UpperCAmelCase__ ):
_snake_case : Any = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_snake_case : List[str] = ["""input_ids""", """attention_mask"""]
_snake_case : str = None
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<unk>" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="<pad>" , lowerCamelCase=False , lowerCamelCase=False , **lowerCamelCase , ):
super().__init__(
lowercase_ , lowercase_ , tokenizer_file=lowercase_ , unk_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , add_prefix_space=lowercase_ , clean_up_tokenization_spaces=lowercase_ , **lowercase_ , )
__a = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowercase_ ) != add_prefix_space:
__a = getattr(lowercase_ , pre_tok_state.pop("type" ) )
__a = add_prefix_space
__a = pre_tok_class(**lowercase_ )
__a = add_prefix_space
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowercase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._batch_encode_plus(*lowercase_ , **lowercase_ )
def a__ ( self , *lowerCamelCase , **lowerCamelCase ):
__a = kwargs.get("is_split_into_words" , lowercase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F"You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"
" pretokenized inputs." )
return super()._encode_plus(*lowercase_ , **lowercase_ )
def a__ ( self , lowerCamelCase , lowerCamelCase = None ):
__a = self._tokenizer.model.save(lowercase_ , name=lowercase_ )
return tuple(lowercase_ )
def a__ ( self , lowerCamelCase ):
__a = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowercase_ , add_special_tokens=lowercase_ ) + [self.eos_token_id] )
if len(lowercase_ ) > self.model_max_length:
__a = input_ids[-self.model_max_length :]
return input_ids
| 261 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , *lowercase_ : Dict , **lowercase_ : Union[str, Any]):
'''simple docstring'''
warnings.warn(
'''The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use DeiTImageProcessor instead.''' , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_)
| 91 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __snake_case (_a ):
def __init__( self : Dict , _UpperCAmelCase : TransformeraDModel , _UpperCAmelCase : AutoencoderKL , _UpperCAmelCase : KarrasDiffusionSchedulers , _UpperCAmelCase : Optional[Dict[int, str]] = None , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(transformer=_UpperCAmelCase , vae=_UpperCAmelCase , scheduler=_UpperCAmelCase )
# create a imagenet -> id dictionary for easier use
_lowerCAmelCase : Optional[int] = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(""",""" ):
_lowerCAmelCase : Any = int(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = dict(sorted(self.labels.items() ) )
def SCREAMING_SNAKE_CASE ( self : int , _UpperCAmelCase : Union[str, List[str]] ) -> List[int]:
'''simple docstring'''
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : Dict = list(_UpperCAmelCase )
for l in label:
if l not in self.labels:
raise ValueError(
f"{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}." )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self : int , _UpperCAmelCase : List[int] , _UpperCAmelCase : float = 4.0 , _UpperCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCAmelCase : int = 50 , _UpperCAmelCase : Optional[str] = "pil" , _UpperCAmelCase : bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_lowerCAmelCase : Tuple = len(_UpperCAmelCase )
_lowerCAmelCase : str = self.transformer.config.sample_size
_lowerCAmelCase : Optional[Any] = self.transformer.config.in_channels
_lowerCAmelCase : List[Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=_UpperCAmelCase , device=self.device , dtype=self.transformer.dtype , )
_lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
_lowerCAmelCase : Tuple = torch.tensor(_UpperCAmelCase , device=self.device ).reshape(-1 )
_lowerCAmelCase : Optional[Any] = torch.tensor([1000] * batch_size , device=self.device )
_lowerCAmelCase : Optional[Any] = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(_UpperCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
_lowerCAmelCase : Any = latent_model_input[: len(_UpperCAmelCase ) // 2]
_lowerCAmelCase : Optional[int] = torch.cat([half, half] , dim=0 )
_lowerCAmelCase : List[Any] = self.scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase )
_lowerCAmelCase : Tuple = t
if not torch.is_tensor(_UpperCAmelCase ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
_lowerCAmelCase : Optional[Any] = latent_model_input.device.type == """mps"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
_lowerCAmelCase : int = torch.floataa if is_mps else torch.floataa
else:
_lowerCAmelCase : str = torch.intaa if is_mps else torch.intaa
_lowerCAmelCase : List[str] = torch.tensor([timesteps] , dtype=_UpperCAmelCase , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
_lowerCAmelCase : Tuple = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
_lowerCAmelCase : Union[str, Any] = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
_lowerCAmelCase : List[str] = self.transformer(
_UpperCAmelCase , timestep=_UpperCAmelCase , class_labels=_UpperCAmelCase ).sample
# perform guidance
if guidance_scale > 1:
_lowerCAmelCase , _lowerCAmelCase : Tuple = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = torch.split(_UpperCAmelCase , len(_UpperCAmelCase ) // 2 , dim=0 )
_lowerCAmelCase : int = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
_lowerCAmelCase : Tuple = torch.cat([half_eps, half_eps] , dim=0 )
_lowerCAmelCase : Union[str, Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
_lowerCAmelCase , _lowerCAmelCase : Tuple = torch.split(_UpperCAmelCase , _UpperCAmelCase , dim=1 )
else:
_lowerCAmelCase : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
_lowerCAmelCase : str = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
if guidance_scale > 1:
_lowerCAmelCase , _lowerCAmelCase : List[Any] = latent_model_input.chunk(2 , dim=0 )
else:
_lowerCAmelCase : Optional[Any] = latent_model_input
_lowerCAmelCase : str = 1 / self.vae.config.scaling_factor * latents
_lowerCAmelCase : Tuple = self.vae.decode(_UpperCAmelCase ).sample
_lowerCAmelCase : int = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCAmelCase : List[str] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : Dict = self.numpy_to_pil(_UpperCAmelCase )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=_UpperCAmelCase )
| 159 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : int ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
# fmt: off
_lowerCAmelCase : Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_lowerCAmelCase : Optional[Any] = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
_lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , _UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] , **_UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCAmelCase : Tuple = [Image.fromarray(np.moveaxis(_UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Tuple = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Optional[int] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_image_processor(do_normalize=_UpperCAmelCase , padding_value=1.0 )
_lowerCAmelCase : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Tuple = image_processor(_UpperCAmelCase , return_tensors="""np""" )
_lowerCAmelCase : Union[str, Any] = processor(images=_UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.get_image_processor()
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : int = """lower newer"""
_lowerCAmelCase : int = processor(text=_UpperCAmelCase )
_lowerCAmelCase : Dict = tokenizer(_UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Any = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Tuple = """lower newer"""
_lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
_lowerCAmelCase : Optional[int] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_UpperCAmelCase ):
processor()
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.get_image_processor()
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : str = processor.batch_decode(_UpperCAmelCase )
_lowerCAmelCase : List[str] = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = self.get_image_processor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[int] = VisionTextDualEncoderProcessor(tokenizer=_UpperCAmelCase , image_processor=_UpperCAmelCase )
_lowerCAmelCase : Dict = """lower newer"""
_lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
_lowerCAmelCase : Union[str, Any] = processor(text=_UpperCAmelCase , images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 159 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __UpperCAmelCase :
def __init__( self: str , UpperCAmelCase_: Tuple , UpperCAmelCase_: int=13 , UpperCAmelCase_: List[Any]=7 , UpperCAmelCase_: Union[str, Any]=True , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: Dict=False , UpperCAmelCase_: Tuple=True , UpperCAmelCase_: List[Any]=99 , UpperCAmelCase_: List[Any]=64 , UpperCAmelCase_: Optional[Any]=5 , UpperCAmelCase_: Optional[int]=4 , UpperCAmelCase_: Optional[Any]=64 , UpperCAmelCase_: List[Any]="gelu" , UpperCAmelCase_: Tuple=0.1 , UpperCAmelCase_: Tuple=0.1 , UpperCAmelCase_: Optional[int]=512 , UpperCAmelCase_: Dict=16 , UpperCAmelCase_: List[Any]=2 , UpperCAmelCase_: Tuple=0.02 , UpperCAmelCase_: List[Any]=3 , UpperCAmelCase_: Optional[Any]=4 , UpperCAmelCase_: str=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_input_mask
_SCREAMING_SNAKE_CASE = use_token_type_ids
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = type_sequence_label_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = num_labels
_SCREAMING_SNAKE_CASE = num_choices
_SCREAMING_SNAKE_CASE = scope
def UpperCamelCase ( self: str ):
'''simple docstring'''
return MPNetConfig.from_pretrained("""microsoft/mpnet-base""" )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
_SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices )
_SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase ( self: str ):
'''simple docstring'''
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MPNetModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase ( self: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MPNetForQuestionAnswering(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = MPNetForSequenceClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Dict , UpperCAmelCase_: str , UpperCAmelCase_: str , UpperCAmelCase_: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_choices
_SCREAMING_SNAKE_CASE = MPNetForMultipleChoice(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_SCREAMING_SNAKE_CASE = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self: List[Any] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.num_labels
_SCREAMING_SNAKE_CASE = MPNetForTokenClassification(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
((_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE) , (_SCREAMING_SNAKE_CASE)) = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : str = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__snake_case : Dict = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case : str = False
__snake_case : Tuple = True
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MPNetModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=37 )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*UpperCAmelCase_ )
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*UpperCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*UpperCAmelCase_ )
@require_torch
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = MPNetModel.from_pretrained("""microsoft/mpnet-base""" )
_SCREAMING_SNAKE_CASE = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.05_50, 0.19_43, -0.07_40], [-0.05_62, 0.22_11, -0.05_79], [-0.04_37, 0.33_37, -0.06_41]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase_ , atol=1E-4 ) )
| 306 |
import random
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = num - 1
_SCREAMING_SNAKE_CASE = 0
while s % 2 == 0:
_SCREAMING_SNAKE_CASE = s // 2
t += 1
for _ in range(5 ):
_SCREAMING_SNAKE_CASE = random.randrange(2 ,num - 1 )
_SCREAMING_SNAKE_CASE = pow(snake_case__ ,snake_case__ ,snake_case__ )
if v != 1:
_SCREAMING_SNAKE_CASE = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_SCREAMING_SNAKE_CASE = i + 1
_SCREAMING_SNAKE_CASE = (v**2) % num
return True
def __lowerCamelCase ( snake_case__ ) -> bool:
"""simple docstring"""
if num < 2:
return False
_SCREAMING_SNAKE_CASE = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
1_01,
1_03,
1_07,
1_09,
1_13,
1_27,
1_31,
1_37,
1_39,
1_49,
1_51,
1_57,
1_63,
1_67,
1_73,
1_79,
1_81,
1_91,
1_93,
1_97,
1_99,
2_11,
2_23,
2_27,
2_29,
2_33,
2_39,
2_41,
2_51,
2_57,
2_63,
2_69,
2_71,
2_77,
2_81,
2_83,
2_93,
3_07,
3_11,
3_13,
3_17,
3_31,
3_37,
3_47,
3_49,
3_53,
3_59,
3_67,
3_73,
3_79,
3_83,
3_89,
3_97,
4_01,
4_09,
4_19,
4_21,
4_31,
4_33,
4_39,
4_43,
4_49,
4_57,
4_61,
4_63,
4_67,
4_79,
4_87,
4_91,
4_99,
5_03,
5_09,
5_21,
5_23,
5_41,
5_47,
5_57,
5_63,
5_69,
5_71,
5_77,
5_87,
5_93,
5_99,
6_01,
6_07,
6_13,
6_17,
6_19,
6_31,
6_41,
6_43,
6_47,
6_53,
6_59,
6_61,
6_73,
6_77,
6_83,
6_91,
7_01,
7_09,
7_19,
7_27,
7_33,
7_39,
7_43,
7_51,
7_57,
7_61,
7_69,
7_73,
7_87,
7_97,
8_09,
8_11,
8_21,
8_23,
8_27,
8_29,
8_39,
8_53,
8_57,
8_59,
8_63,
8_77,
8_81,
8_83,
8_87,
9_07,
9_11,
9_19,
9_29,
9_37,
9_41,
9_47,
9_53,
9_67,
9_71,
9_77,
9_83,
9_91,
9_97,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(snake_case__ )
def __lowerCamelCase ( snake_case__ = 10_24 ) -> int:
"""simple docstring"""
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (keysize - 1) ,2 ** (keysize) )
if is_prime_low_num(snake_case__ ):
return num
if __name__ == "__main__":
UpperCamelCase = generate_large_prime()
print(('''Prime number:''', num))
print(('''is_prime_low_num:''', is_prime_low_num(num)))
| 306 | 1 |
"""simple docstring"""
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Tuple = len(__UpperCamelCase ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCAmelCase_ : Any = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
lowerCAmelCase_ : List[str] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowerCAmelCase_ : Dict = left
lowerCAmelCase_ : str = point
elif point > right:
lowerCAmelCase_ : List[str] = right
lowerCAmelCase_ : List[str] = point
else:
if item < current_item:
lowerCAmelCase_ : Union[str, Any] = point - 1
else:
lowerCAmelCase_ : Tuple = point + 1
return None
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowerCAmelCase_ : str = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(__UpperCamelCase ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif point > right:
return interpolation_search_by_recursion(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , point - 1 )
else:
return interpolation_search_by_recursion(
__UpperCamelCase , __UpperCamelCase , point + 1 , __UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase ) -> List[str]:
"""simple docstring"""
if collection != sorted(__UpperCamelCase ):
raise ValueError("Collection must be ascending sorted" )
return True
if __name__ == "__main__":
import sys
lowercase__ = 0
if debug == 1:
lowercase__ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
lowercase__ = 67
lowercase__ = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print("""Not found""")
| 358 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Tuple = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__UpperCamelCase )] )
lowerCAmelCase_ : str = np.array(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __UpperCamelCase ) ) , x.transpose() ) , __UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : Union[str, Any] = (1, 2, 1)
lowerCAmelCase_ : str = (1, 1, 0, 7)
lowerCAmelCase_ : List[Any] = SARIMAX(
__UpperCamelCase , exog=__UpperCamelCase , order=__UpperCamelCase , seasonal_order=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = model.fit(disp=__UpperCamelCase , maxiter=600 , method="nm" )
lowerCAmelCase_ : Optional[Any] = model_fit.predict(1 , len(__UpperCamelCase ) , exog=[test_match] )
return result[0]
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> float:
"""simple docstring"""
lowerCAmelCase_ : int = SVR(kernel="rbf" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Dict = regressor.predict(__UpperCamelCase )
return y_pred[0]
def __lowerCamelCase ( __UpperCamelCase ) -> float:
"""simple docstring"""
train_user.sort()
lowerCAmelCase_ : Optional[Any] = np.percentile(__UpperCamelCase , 25 )
lowerCAmelCase_ : List[Any] = np.percentile(__UpperCamelCase , 75 )
lowerCAmelCase_ : Union[str, Any] = qa - qa
lowerCAmelCase_ : List[Any] = qa - (iqr * 0.1)
return low_lim
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> bool:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = 0
lowerCAmelCase_ : Union[str, Any] = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase_ : Tuple = not_safe + 1
else:
if abs(abs(__UpperCamelCase ) - abs(__UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
lowercase__ = [[18231, 0.0, 1], [22621, 1.0, 2], [15675, 0.0, 3], [23583, 1.0, 4]]
lowercase__ = pd.DataFrame(
data_input, columns=["""total_user""", """total_even""", """days"""]
)
lowercase__ = Normalizer().fit_transform(data_input_df.values)
# split data
lowercase__ = normalize_df[:, 2].tolist()
lowercase__ = normalize_df[:, 0].tolist()
lowercase__ = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
lowercase__ = normalize_df[:, [1, 2]].tolist()
lowercase__ = x[: len(x) - 1]
lowercase__ = x[len(x) - 1 :]
# for linear regression & sarimax
lowercase__ = total_date[: len(total_date) - 1]
lowercase__ = total_user[: len(total_user) - 1]
lowercase__ = total_match[: len(total_match) - 1]
lowercase__ = total_date[len(total_date) - 1 :]
lowercase__ = total_user[len(total_user) - 1 :]
lowercase__ = total_match[len(total_match) - 1 :]
# voting system with forecasting
lowercase__ = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
lowercase__ = """""" if data_safety_checker(res_vote, tst_user) else """not """
print("""Today's data is {not_str}safe.""")
| 161 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :Tuple = {
'roberta-base': 'https://huggingface.co/roberta-base/resolve/main/config.json',
'roberta-large': 'https://huggingface.co/roberta-large/resolve/main/config.json',
'roberta-large-mnli': 'https://huggingface.co/roberta-large-mnli/resolve/main/config.json',
'distilroberta-base': 'https://huggingface.co/distilroberta-base/resolve/main/config.json',
'roberta-base-openai-detector': 'https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json',
'roberta-large-openai-detector': 'https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json',
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "roberta"
def __init__( self : Optional[int] ,A : Optional[int]=5_02_65 ,A : Union[str, Any]=7_68 ,A : Tuple=12 ,A : Any=12 ,A : List[Any]=30_72 ,A : str="gelu" ,A : Dict=0.1 ,A : Union[str, Any]=0.1 ,A : Dict=5_12 ,A : Union[str, Any]=2 ,A : Tuple=0.02 ,A : Dict=1E-12 ,A : Optional[Any]=1 ,A : int=0 ,A : Union[str, Any]=2 ,A : str="absolute" ,A : Optional[int]=True ,A : Dict=None ,**A : Optional[Any] ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = use_cache
__A = classifier_dropout
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : List[Any] ):
if self.task == "multiple-choice":
__A = {0: "batch", 1: "choice", 2: "sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 15 |
import PIL.Image
import PIL.ImageOps
from packaging import version
from PIL import Image
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('9.1.0'):
SCREAMING_SNAKE_CASE :Any = {
'linear': PIL.Image.Resampling.BILINEAR,
'bilinear': PIL.Image.Resampling.BILINEAR,
'bicubic': PIL.Image.Resampling.BICUBIC,
'lanczos': PIL.Image.Resampling.LANCZOS,
'nearest': PIL.Image.Resampling.NEAREST,
}
else:
SCREAMING_SNAKE_CASE :int = {
'linear': PIL.Image.LINEAR,
'bilinear': PIL.Image.BILINEAR,
'bicubic': PIL.Image.BICUBIC,
'lanczos': PIL.Image.LANCZOS,
'nearest': PIL.Image.NEAREST,
}
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
__A = (images / 2 + 0.5).clamp(0 , 1 )
__A = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__A = numpy_to_pil(a_ )
return images
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if images.ndim == 3:
__A = images[None, ...]
__A = (images * 2_5_5).round().astype("uint8" )
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
__A = [Image.fromarray(image.squeeze() , mode="L" ) for image in images]
else:
__A = [Image.fromarray(a_ ) for image in images]
return pil_images
| 15 | 1 |
"""simple docstring"""
import unittest
from transformers import BertGenerationTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ = """โ"""
lowercase__ = get_tests_dir("""fixtures/test_sentencepiece.model""")
@require_sentencepiece
class __lowerCamelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a_ : int = BertGenerationTokenizer
a_ : Dict = False
a_ : str = True
def lowerCamelCase ( self : Any ):
super().setUp()
lowerCAmelCase_ : Optional[Any] = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : Dict = '''<s>'''
lowerCAmelCase_ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowercase ) , __lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowercase ) , __lowercase )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<pad>" )
self.assertEqual(len(__lowercase ) , 10_02 )
def lowerCamelCase ( self : Tuple ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_00 )
def lowerCamelCase ( self : List[Any] ):
lowerCAmelCase_ : str = BertGenerationTokenizer(__lowercase , keep_accents=__lowercase )
lowerCAmelCase_ : Optional[Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__lowercase , ["โThis", "โis", "โa", "โt", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowercase ) , [2_85, 46, 10, 1_70, 3_82] , )
lowerCAmelCase_ : List[Any] = tokenizer.tokenize("I was born in 92000, and this is falsรฉ." )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"รฉ",
".",
] , )
lowerCAmelCase_ : Any = tokenizer.convert_tokens_to_ids(__lowercase )
self.assertListEqual(
__lowercase , [8, 21, 84, 55, 24, 19, 7, 0, 6_02, 3_47, 3_47, 3_47, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCAmelCase_ : Tuple = tokenizer.convert_ids_to_tokens(__lowercase )
self.assertListEqual(
__lowercase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def lowerCamelCase ( self : List[Any] ):
return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
@slow
def lowerCamelCase ( self : int ):
lowerCAmelCase_ : Optional[Any] = '''Hello World!'''
lowerCAmelCase_ : Optional[int] = [1_85_36, 22_60, 1_01]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@slow
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Optional[Any] = (
'''This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will'''
''' add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth'''
)
lowerCAmelCase_ : Union[str, Any] = [
8_71,
4_19,
3_58,
9_46,
9_91,
25_21,
4_52,
3_58,
13_57,
3_87,
77_51,
35_36,
1_12,
9_85,
4_56,
1_26,
8_65,
9_38,
54_00,
57_34,
4_58,
13_68,
4_67,
7_86,
24_62,
52_46,
11_59,
6_33,
8_65,
45_19,
4_57,
5_82,
8_52,
25_57,
4_27,
9_16,
5_08,
4_05,
3_43_24,
4_97,
3_91,
4_08,
1_13_42,
12_44,
3_85,
1_00,
9_38,
9_85,
4_56,
5_74,
3_62,
1_25_97,
32_00,
31_29,
11_72,
]
self.assertListEqual(__lowercase , self.big_tokenizer.encode(__lowercase ) )
@require_torch
@slow
def lowerCamelCase ( self : Any ):
import torch
from transformers import BertGenerationConfig, BertGenerationEncoder
# Build sequence
lowerCAmelCase_ : Optional[Any] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCAmelCase_ : Optional[int] = ''' '''.join(__lowercase )
lowerCAmelCase_ : Optional[int] = self.big_tokenizer.encode_plus(__lowercase , return_tensors="pt" , return_token_type_ids=__lowercase )
lowerCAmelCase_ : List[str] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__lowercase )
lowerCAmelCase_ : List[Any] = BertGenerationConfig()
lowerCAmelCase_ : Optional[Any] = BertGenerationEncoder(__lowercase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowercase )
model(**__lowercase )
@slow
def lowerCamelCase ( self : int ):
# fmt: off
lowerCAmelCase_ : List[str] = {'''input_ids''': [[3_92_86, 4_58, 3_63_35, 20_01, 4_56, 1_30_73, 1_32_66, 4_55, 1_13, 77_46, 17_41, 1_11_57, 3_91, 1_30_73, 1_32_66, 4_55, 1_13, 39_67, 3_54_12, 1_13, 49_36, 1_09, 38_70, 23_77, 1_13, 3_00_84, 4_57_20, 4_58, 1_34, 1_74_96, 1_12, 5_03, 1_16_72, 1_13, 1_18, 1_12, 56_65, 1_33_47, 3_86_87, 1_12, 14_96, 3_13_89, 1_12, 32_68, 4_72_64, 1_34, 9_62, 1_12, 1_63_77, 80_35, 2_31_30, 4_30, 1_21_69, 1_55_18, 2_85_92, 4_58, 1_46, 4_16_97, 1_09, 3_91, 1_21_69, 1_55_18, 1_66_89, 4_58, 1_46, 4_13_58, 1_09, 4_52, 7_26, 40_34, 1_11, 7_63, 3_54_12, 50_82, 3_88, 19_03, 1_11, 90_51, 3_91, 28_70, 4_89_18, 19_00, 11_23, 5_50, 9_98, 1_12, 95_86, 1_59_85, 4_55, 3_91, 4_10, 2_29_55, 3_76_36, 1_14], [4_48, 1_74_96, 4_19, 36_63, 3_85, 7_63, 1_13, 2_75_33, 28_70, 32_83, 1_30_43, 16_39, 2_47_13, 5_23, 6_56, 2_40_13, 1_85_50, 25_21, 5_17, 2_70_14, 2_12_44, 4_20, 12_12, 14_65, 3_91, 9_27, 48_33, 3_88, 5_78, 1_17_86, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_84, 21_69, 76_87, 2_19_32, 1_81_46, 7_26, 3_63, 1_70_32, 33_91, 1_14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__lowercase , model_name="google/bert_for_seq_generation_L-24_bbc_encoder" , revision="c817d1fd1be2ffa69431227a1fe320544943d4db" , )
| 367 |
"""simple docstring"""
from pathlib import Path
import numpy as np
from PIL import Image
def __lowerCamelCase ( __UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : int = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.29_89 * r + 0.58_70 * g + 0.11_40 * b
def __lowerCamelCase ( __UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
return (gray > 127) & (gray <= 255)
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase ) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase_ : List[str] = np.zeros_like(__UpperCamelCase )
lowerCAmelCase_ : Dict = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
lowerCAmelCase_ : List[Any] = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
lowerCAmelCase_ : List[str] = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase_ : int = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowercase__ = Path(__file__).resolve().parent / """image_data""" / """lena.jpg"""
lowercase__ = np.array(Image.open(lena_path))
# kernel to be applied
lowercase__ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowercase__ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowercase__ = Image.fromarray(output).convert("""RGB""")
pil_img.save("""result_dilation.png""")
| 161 | 0 |
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class snake_case ( _a ):
SCREAMING_SNAKE_CASE_ : List[str] = ["""image_processor"""]
SCREAMING_SNAKE_CASE_ : Dict = """SamImageProcessor"""
def __init__( self : int , UpperCamelCase__ : str)-> Optional[int]:
'''simple docstring'''
super().__init__(A_)
__lowerCAmelCase: List[Any] = self.image_processor
__lowerCAmelCase: Optional[Any] = -1_0
__lowerCAmelCase: Union[str, Any] = self.image_processor.size["longest_edge"]
def __call__( self : List[Any] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , **UpperCamelCase__ : Tuple , )-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.image_processor(
A_ , return_tensors=A_ , **A_ , )
# pop arguments that are not used in the foward but used nevertheless
__lowerCAmelCase: Tuple = encoding_image_processor["original_sizes"]
if hasattr(A_ , "numpy"): # Checks if Torch or TF tensor
__lowerCAmelCase: Optional[Any] = original_sizes.numpy()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase: List[Any] = self._check_and_preprocess_points(
input_points=A_ , input_labels=A_ , input_boxes=A_ , )
__lowerCAmelCase: Tuple = self._normalize_and_convert(
A_ , A_ , input_points=A_ , input_labels=A_ , input_boxes=A_ , return_tensors=A_ , )
return encoding_image_processor
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]="pt" , )-> List[str]:
'''simple docstring'''
if input_points is not None:
if len(A_) != len(A_):
__lowerCAmelCase: Optional[int] = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0]) for point in input_points
]
else:
__lowerCAmelCase: int = [
self._normalize_coordinates(self.target_size , A_ , A_)
for point, original_size in zip(A_ , A_)
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points):
if input_labels is not None:
__lowerCAmelCase , __lowerCAmelCase: str = self._pad_points_and_labels(A_ , A_)
__lowerCAmelCase: Any = np.array(A_)
if input_labels is not None:
__lowerCAmelCase: List[str] = np.array(A_)
if input_boxes is not None:
if len(A_) != len(A_):
__lowerCAmelCase: Dict = [
self._normalize_coordinates(self.target_size , A_ , original_sizes[0] , is_bounding_box=A_)
for box in input_boxes
]
else:
__lowerCAmelCase: int = [
self._normalize_coordinates(self.target_size , A_ , A_ , is_bounding_box=A_)
for box, original_size in zip(A_ , A_)
]
__lowerCAmelCase: Optional[int] = np.array(A_)
if input_boxes is not None:
if return_tensors == "pt":
__lowerCAmelCase: Dict = torch.from_numpy(A_)
# boxes batch size of 1 by default
__lowerCAmelCase: Optional[int] = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes
elif return_tensors == "tf":
__lowerCAmelCase: List[str] = tf.convert_to_tensor(A_)
# boxes batch size of 1 by default
__lowerCAmelCase: Any = tf.expand_dims(A_ , 1) if len(input_boxes.shape) != 3 else input_boxes
encoding_image_processor.update({"input_boxes": input_boxes})
if input_points is not None:
if return_tensors == "pt":
__lowerCAmelCase: Union[str, Any] = torch.from_numpy(A_)
# point batch size of 1 by default
__lowerCAmelCase: List[Any] = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points
elif return_tensors == "tf":
__lowerCAmelCase: List[str] = tf.convert_to_tensor(A_)
# point batch size of 1 by default
__lowerCAmelCase: Dict = tf.expand_dims(A_ , 1) if len(input_points.shape) != 4 else input_points
encoding_image_processor.update({"input_points": input_points})
if input_labels is not None:
if return_tensors == "pt":
__lowerCAmelCase: Optional[int] = torch.from_numpy(A_)
# point batch size of 1 by default
__lowerCAmelCase: List[Any] = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels
elif return_tensors == "tf":
__lowerCAmelCase: Dict = tf.convert_to_tensor(A_)
# point batch size of 1 by default
__lowerCAmelCase: Optional[Any] = tf.expand_dims(A_ , 1) if len(input_labels.shape) != 3 else input_labels
encoding_image_processor.update({"input_labels": input_labels})
return encoding_image_processor
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: str = max([point.shape[0] for point in input_points])
__lowerCAmelCase: Tuple = []
for i, point in enumerate(A_):
if point.shape[0] != expected_nb_points:
__lowerCAmelCase: Any = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0)
__lowerCAmelCase: List[str] = np.append(input_labels[i] , [self.point_pad_value])
processed_input_points.append(A_)
__lowerCAmelCase: str = processed_input_points
return input_points, input_labels
def lowercase_ ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=False)-> str:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Any = original_size
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.image_processor._get_preprocess_shape(A_ , longest_edge=A_)
__lowerCAmelCase: List[str] = deepcopy(A_).astype(A_)
if is_bounding_box:
__lowerCAmelCase: Optional[int] = coords.reshape(-1 , 2 , 2)
__lowerCAmelCase: Any = coords[..., 0] * (new_w / old_w)
__lowerCAmelCase: Optional[int] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__lowerCAmelCase: Optional[Any] = coords.reshape(-1 , 4)
return coords
def lowercase_ ( self : int , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , )-> Optional[int]:
'''simple docstring'''
if input_points is not None:
if hasattr(A_ , "numpy"): # Checks for TF or Torch tensor
__lowerCAmelCase: List[str] = input_points.numpy().tolist()
if not isinstance(A_ , A_) or not isinstance(input_points[0] , A_):
raise ValueError("Input points must be a list of list of floating points.")
__lowerCAmelCase: Tuple = [np.array(A_) for input_point in input_points]
else:
__lowerCAmelCase: Dict = None
if input_labels is not None:
if hasattr(A_ , "numpy"):
__lowerCAmelCase: Any = input_labels.numpy().tolist()
if not isinstance(A_ , A_) or not isinstance(input_labels[0] , A_):
raise ValueError("Input labels must be a list of list integers.")
__lowerCAmelCase: int = [np.array(A_) for label in input_labels]
else:
__lowerCAmelCase: List[Any] = None
if input_boxes is not None:
if hasattr(A_ , "numpy"):
__lowerCAmelCase: Optional[Any] = input_boxes.numpy().tolist()
if (
not isinstance(A_ , A_)
or not isinstance(input_boxes[0] , A_)
or not isinstance(input_boxes[0][0] , A_)
):
raise ValueError("Input boxes must be a list of list of list of floating points.")
__lowerCAmelCase: List[Any] = [np.array(A_).astype(np.floataa) for box in input_boxes]
else:
__lowerCAmelCase: Dict = None
return input_points, input_labels, input_boxes
@property
def lowercase_ ( self : Tuple)-> int:
'''simple docstring'''
__lowerCAmelCase: Dict = self.image_processor.model_input_names
return list(dict.fromkeys(A_))
def lowercase_ ( self : List[Any] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[str])-> List[str]:
'''simple docstring'''
return self.image_processor.post_process_masks(*A_ , **A_)
| 217 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCamelCase (_a ):
_lowercase = """M-CLIP"""
def __init__( self: int,A_: Any=1024,A_: Union[str, Any]=768,**A_: str ):
'''simple docstring'''
__UpperCamelCase = transformerDimSize
__UpperCamelCase = imageDimSize
super().__init__(**A_ )
class __lowerCamelCase (_a ):
_lowercase = MCLIPConfig
def __init__( self: int,A_: Optional[Any],*A_: List[str],**A_: Union[str, Any] ):
'''simple docstring'''
super().__init__(A_,*A_,**A_ )
__UpperCamelCase = XLMRobertaModel(A_ )
__UpperCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions,out_features=config.numDims )
def snake_case_ ( self: Dict,A_: int,A_: Optional[int] ):
'''simple docstring'''
__UpperCamelCase = self.transformer(input_ids=A_,attention_mask=A_ )[0]
__UpperCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(A_ ), embs
| 310 | 0 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class _UpperCAmelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
snake_case = BarthezTokenizer
snake_case = BarthezTokenizerFast
snake_case = True
snake_case = True
def lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
_A = BarthezTokenizerFast.from_pretrained("moussaKam/mbarthez" )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=__UpperCAmelCase )
_A = tokenizer
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
_A = "<pad>"
_A = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCAmelCase ) , __UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCAmelCase ) , __UpperCAmelCase )
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
_A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__UpperCAmelCase ) , 101122 )
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
_A = ["A long paragraph for summarization.", "Another paragraph for summarization."]
_A = [0, 57, 3018, 70307, 91, 2]
_A = self.tokenizer(
__UpperCAmelCase , max_length=len(__UpperCAmelCase ) , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , return_tensors="pt" )
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_A = batch.input_ids.tolist()[0]
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_A = self.get_tokenizer()
_A = self.get_rust_tokenizer()
_A = "I was born in 92000, and this is falsรฉ."
_A = tokenizer.tokenize(__UpperCAmelCase )
_A = rust_tokenizer.tokenize(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
_A = rust_tokenizer.encode(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
_A = self.get_rust_tokenizer()
_A = tokenizer.encode(__UpperCAmelCase )
_A = rust_tokenizer.encode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
@slow
def lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
_A = {"input_ids": [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_A = [
"Le transformeur est un modรจle d\'apprentissage profond introduit en 2017, "
"utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).",
"ร l\'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus "
"pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches "
"telles que la traduction et la synthรจse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__UpperCAmelCase , model_name="moussaKam/mbarthez" , revision="c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6" , sequences=__UpperCAmelCase , )
| 354 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(__lowercase , 2 ) - pow(__lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(__lowercase , 2 ) + pow(__lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 174 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
SCREAMING_SNAKE_CASE :Union[str, Any] = logging.get_logger(__name__)
@dataclass
class __lowerCAmelCase :
"""simple docstring"""
_SCREAMING_SNAKE_CASE = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
_SCREAMING_SNAKE_CASE = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
_SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
_SCREAMING_SNAKE_CASE = field(
default=a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def lowerCAmelCase__ ( self : str ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.task_name.lower()
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'train'
_SCREAMING_SNAKE_CASE = 'dev'
_SCREAMING_SNAKE_CASE = 'test'
class __lowerCAmelCase ( a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = 42
def __init__( self : Any , _lowerCAmelCase : GlueDataTrainingArguments , _lowerCAmelCase : PreTrainedTokenizerBase , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Union[str, Split] = Split.train , _lowerCAmelCase : Optional[str] = None , ) -> Any:
"""simple docstring"""
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the ๐ค Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , _lowerCAmelCase , )
snake_case_ = args
snake_case_ = glue_processors[args.task_name]()
snake_case_ = glue_output_modes[args.task_name]
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
snake_case_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
snake_case_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , F'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
snake_case_ = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case_ , snake_case_ = label_list[2], label_list[1]
snake_case_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case_ = cached_features_file + ".lock"
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not args.overwrite_cache:
snake_case_ = time.time()
snake_case_ = torch.load(_lowerCAmelCase )
logger.info(
F'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(F'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
snake_case_ = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
snake_case_ = self.processor.get_test_examples(args.data_dir )
else:
snake_case_ = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
snake_case_ = examples[:limit_length]
snake_case_ = glue_convert_examples_to_features(
_lowerCAmelCase , _lowerCAmelCase , max_length=args.max_seq_length , label_list=_lowerCAmelCase , output_mode=self.output_mode , )
snake_case_ = time.time()
torch.save(self.features , _lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
F'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return len(self.features )
def __getitem__( self : List[str] , _lowerCAmelCase : List[str] ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def lowerCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
return self.label_list
| 159 |
from math import pi
def _lowerCAmelCase ( lowerCAmelCase_ :int , lowerCAmelCase_ :int )->float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 159 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"""asapp/sew-tiny-100k""": """https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json""",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase ):
__lowerCAmelCase = """sew"""
def __init__( self : Optional[int] , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : List[str]=768 , lowerCamelCase_ : str=12 , lowerCamelCase_ : List[Any]=12 , lowerCamelCase_ : Any=3072 , lowerCamelCase_ : Union[str, Any]=2 , lowerCamelCase_ : Optional[Any]="gelu" , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : List[Any]=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : int=0.0 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Optional[Any]=0.1 , lowerCamelCase_ : str=0.0_2 , lowerCamelCase_ : List[str]=1E-5 , lowerCamelCase_ : Union[str, Any]="group" , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , lowerCamelCase_ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase_ : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase_ : str=False , lowerCamelCase_ : str=128 , lowerCamelCase_ : List[str]=16 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Tuple=0.0_5 , lowerCamelCase_ : int=10 , lowerCamelCase_ : List[str]=2 , lowerCamelCase_ : Tuple=0.0 , lowerCamelCase_ : Optional[Any]=10 , lowerCamelCase_ : Any=0 , lowerCamelCase_ : List[str]="mean" , lowerCamelCase_ : Optional[int]=False , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : List[str]=256 , lowerCamelCase_ : Dict=0 , lowerCamelCase_ : Tuple=1 , lowerCamelCase_ : Tuple=2 , **lowerCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_norm
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(lowerCamelCase_ )
UpperCamelCase = list(lowerCamelCase_ )
UpperCamelCase = list(lowerCamelCase_ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = squeeze_factor
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = apply_spec_augment
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# sequence classification
UpperCamelCase = use_weighted_layer_sum
UpperCamelCase = classifier_proj_size
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 165 | from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def lowercase( UpperCamelCase_ = True , *UpperCamelCase_ , **UpperCamelCase_ ) -> int:
'''simple docstring'''
if not is_tqdm_available():
raise ImportError("""Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.""" )
UpperCamelCase = False
if main_process_only:
UpperCamelCase = PartialState().local_process_index == 0
return _tqdm(*UpperCamelCase_ , **UpperCamelCase_ , disable=UpperCamelCase_ )
| 165 | 1 |
"""simple docstring"""
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase = get_tests_dir('fixtures/test_sentencepiece.model')
if is_sentencepiece_available():
import sentencepiece as sp
__UpperCAmelCase = 5
__UpperCAmelCase = 10
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ):
UpperCAmelCase_ :int = SpeechaTextTokenizer
UpperCAmelCase_ :List[str] = False
UpperCAmelCase_ :Any = True
def __lowerCAmelCase ( self ) -> Tuple:
super().setUp()
lowerCAmelCase_ :Union[str, Any] = sp.SentencePieceProcessor()
spm_model.Load(_A )
lowerCAmelCase_ :Optional[Any] = ["""<s>""", """<pad>""", """</s>""", """<unk>"""]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(_A ) )]
lowerCAmelCase_ :int = dict(zip(_A , range(len(_A ) ) ) )
lowerCAmelCase_ :int = Path(self.tmpdirname )
save_json(_A , save_dir / VOCAB_FILES_NAMES["""vocab_file"""] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_A , save_dir / VOCAB_FILES_NAMES["""spm_file"""] )
lowerCAmelCase_ :Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :int = """<pad>"""
lowerCAmelCase_ :Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(_A ) , 1001 )
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Any = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase_ :List[str] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(_A , ["""โThis""", """โis""", """โa""", """โt""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase_ :List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsรฉ.""" )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """รฉ""", """."""] , )
lowerCAmelCase_ :List[str] = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(_A , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase_ :Union[str, Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = {"""input_ids""": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""facebook/s2t-small-mustc-en-de-st""" , revision="""a14f04cf0776c02f62a8cb800cf7909e15ea23ad""" , )
@require_sentencepiece
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
UpperCAmelCase_ :List[str] = 'valhalla/s2t_mustc_multilinguial_medium'
UpperCAmelCase_ :Optional[int] = 'C\'est trop cool'
UpperCAmelCase_ :int = 'Esto es genial'
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[int]:
lowerCAmelCase_ :int = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __lowerCAmelCase ( self ) -> Optional[int]:
self.assertEqual(self.tokenizer.lang_code_to_id["""pt"""] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["""ru"""] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["""it"""] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["""de"""] , 11 )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
self.assertEqual(self.tokenizer.vocab_size , 1_0000 )
def __lowerCAmelCase ( self ) -> List[Any]:
self.assertIn(_A , self.tokenizer.all_special_ids )
lowerCAmelCase_ :Optional[Any] = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase_ :Dict = self.tokenizer.decode(_A , skip_special_tokens=_A )
lowerCAmelCase_ :Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Tuple = """fr"""
lowerCAmelCase_ :str = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , _A )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __lowerCAmelCase ( self ) -> Any:
lowerCAmelCase_ :int = """fr"""
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase_ :Optional[int] = """es"""
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 84 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def snake_case ( UpperCAmelCase )-> Dict:
"""simple docstring"""
__A = torch.exp(UpperCAmelCase )
__A = torch.sum(UpperCAmelCase , dim=1 ) # sum of exp(x_i)
__A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(UpperCAmelCase ) - B / A
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :int ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
__A = config.output_attentions
__A = config.output_hidden_states
__A = nn.ModuleList([BertLayer(_A ) for _ in range(config.num_hidden_layers )] )
__A = nn.ModuleList([BertHighway(_A ) for _ in range(config.num_hidden_layers )] )
__A = [-1 for _ in range(config.num_hidden_layers )]
def lowercase_ ( self :Any , _A :List[Any] ) -> Tuple:
'''simple docstring'''
if (type(_A ) is float) or (type(_A ) is int):
for i in range(len(self.early_exit_entropy ) ):
__A = x
else:
__A = x
def lowercase_ ( self :Optional[Any] , _A :List[str] ) -> Dict:
'''simple docstring'''
__A = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def lowercase_ ( self :List[Any] , _A :Tuple , _A :Tuple=None , _A :int=None , _A :List[Any]=None , _A :str=None , ) -> Tuple:
'''simple docstring'''
__A = ()
__A = ()
__A = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = layer_module(
_A , _A , head_mask[i] , _A , _A )
__A = layer_outputs[0]
if self.output_attentions:
__A = all_attentions + (layer_outputs[1],)
__A = (hidden_states,)
if self.output_hidden_states:
__A = current_outputs + (all_hidden_states,)
if self.output_attentions:
__A = current_outputs + (all_attentions,)
__A = self.highway[i](_A )
# logits, pooled_output
if not self.training:
__A = highway_exit[0]
__A = entropy(_A )
__A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
__A = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
__A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(_A , i + 1 )
else:
__A = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
__A = all_hidden_states + (hidden_states,)
__A = (hidden_states,)
if self.output_hidden_states:
__A = outputs + (all_hidden_states,)
if self.output_attentions:
__A = outputs + (all_attentions,)
__A = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'The Bert Model transformer with early exiting (DeeBERT). ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Tuple , _A :List[str] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config
__A = BertEmbeddings(_A )
__A = DeeBertEncoder(_A )
__A = BertPooler(_A )
self.init_weights()
def lowercase_ ( self :Union[str, Any] ) -> str:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def lowercase_ ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
return self.embeddings.word_embeddings
def lowercase_ ( self :Tuple , _A :Tuple ) -> Union[str, Any]:
'''simple docstring'''
__A = value
def lowercase_ ( self :int , _A :int ) -> Tuple:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(_A )
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :int=None , _A :List[Any]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Optional[int]=None , _A :Any=None , _A :List[str]=None , _A :Optional[Any]=None , ) -> Union[str, Any]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
__A = input_ids.size()
elif inputs_embeds is not None:
__A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
__A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
__A = torch.ones(_A , device=_A )
if encoder_attention_mask is None:
__A = torch.ones(_A , device=_A )
if token_type_ids is None:
__A = torch.zeros(_A , dtype=torch.long , device=_A )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
__A = self.get_extended_attention_mask(_A , _A , _A )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
__A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
__A = encoder_attention_mask[:, None, None, :]
__A = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
__A = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
__A = self.get_head_mask(_A , self.config.num_hidden_layers )
__A = self.embeddings(
input_ids=_A , position_ids=_A , token_type_ids=_A , inputs_embeds=_A )
__A = self.encoder(
_A , attention_mask=_A , head_mask=_A , encoder_hidden_states=_A , encoder_attention_mask=_A , )
__A = encoder_outputs[0]
__A = self.pooler(_A )
__A = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :Optional[Any] , _A :str , _A :List[str] ) -> Optional[int]:
'''simple docstring'''
__A = message
__A = exit_layer # start from 1!
class UpperCamelCase__ ( nn.Module):
def __init__( self :Any , _A :Dict ) -> Tuple:
'''simple docstring'''
super().__init__()
__A = BertPooler(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , config.num_labels )
def lowercase_ ( self :List[Any] , _A :Optional[Any] ) -> int:
'''simple docstring'''
__A = encoder_outputs[0]
__A = self.pooler(_A )
# "return" pooler_output
# BertModel
__A = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
__A = bmodel_output[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
return logits, pooled_output
@add_start_docstrings(
'Bert Model (with early exiting - DeeBERT) with a classifier on top,\n also takes care of multi-layer training. ' , SCREAMING_SNAKE_CASE , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE):
def __init__( self :str , _A :Optional[Any] ) -> str:
'''simple docstring'''
super().__init__(_A )
__A = config.num_labels
__A = config.num_hidden_layers
__A = DeeBertModel(_A )
__A = nn.Dropout(config.hidden_dropout_prob )
__A = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(_A )
def lowercase_ ( self :Tuple , _A :str=None , _A :Optional[int]=None , _A :Any=None , _A :str=None , _A :int=None , _A :Tuple=None , _A :Any=None , _A :List[str]=-1 , _A :Optional[Any]=False , ) -> List[str]:
'''simple docstring'''
__A = self.num_layers
try:
__A = self.bert(
_A , attention_mask=_A , token_type_ids=_A , position_ids=_A , head_mask=_A , inputs_embeds=_A , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
__A = outputs[1]
__A = self.dropout(_A )
__A = self.classifier(_A )
__A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__A = e.message
__A = e.exit_layer
__A = outputs[0]
if not self.training:
__A = entropy(_A )
__A = []
__A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
__A = []
for highway_exit in outputs[-1]:
__A = highway_exit[0]
if not self.training:
highway_logits_all.append(_A )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__A = MSELoss()
__A = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
__A = CrossEntropyLoss()
__A = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(_A )
if train_highway:
__A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__A = (loss,) + outputs
if not self.training:
__A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 161 | 0 |
def A (__A : int = 1000 ) -> int:
"""simple docstring"""
UpperCAmelCase_ = -1
UpperCAmelCase_ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCAmelCase_ = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCAmelCase_ = n - a - b
if c * c == (a * a + b * b):
UpperCAmelCase_ = a * b * c
if candidate >= product:
UpperCAmelCase_ = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 7 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
snake_case_ : Any = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
snake_case_ : Optional[Any] = 128022
snake_case_ : Optional[int] = 128028
@require_sentencepiece
class __snake_case ( a , unittest.TestCase ):
UpperCAmelCase__ : List[str] = MaMaaaTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : List[str] = True
def lowerCamelCase ( self : str):
"""simple docstring"""
super().setUp()
UpperCAmelCase_ = ['''</s>''', '''<unk>''', '''โThis''', '''โis''', '''โa''', '''โt''', '''est''', '''\u0120''', '''<pad>''']
UpperCAmelCase_ = dict(zip(_snake_case , range(len(_snake_case))))
UpperCAmelCase_ = Path(self.tmpdirname)
save_json(_snake_case , save_dir / VOCAB_FILES_NAMES['''vocab_file'''])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_snake_case , save_dir / VOCAB_FILES_NAMES['''spm_file'''])
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def lowerCamelCase ( self : str , **_snake_case : Union[str, Any]):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_snake_case)
def lowerCamelCase ( self : Optional[int] , _snake_case : List[str]):
"""simple docstring"""
return (
"This is a test",
"This is a test",
)
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
UpperCAmelCase_ = '''</s>'''
UpperCAmelCase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_snake_case) , _snake_case)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_snake_case) , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = list(tokenizer.get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''</s>''')
self.assertEqual(vocab_keys[1] , '''<unk>''')
self.assertEqual(vocab_keys[-1] , '''<s>''')
self.assertEqual(len(_snake_case) , tokenizer.vocab_size + len(tokenizer.get_added_vocab()))
@unittest.skip('''Skip this test while all models are still to be uploaded.''')
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
pass
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
self.assertListEqual(_snake_case , ['''โThis''', '''โis''', '''โa''', '''โt''', '''est'''])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_snake_case) , [2, 3, 4, 5, 6] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6])
self.assertListEqual(_snake_case , ['''โThis''', '''โis''', '''โa''', '''โt''', '''est'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(_snake_case)
self.assertEqual(_snake_case , '''This is a test''')
@slow
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = {'''input_ids''': [[128022, 110108, 397, 11, 38272, 2247, 124811, 285, 18105, 1586, 207, 7, 39534, 4428, 397, 1019, 18105, 1586, 207, 7, 41337, 16786, 241, 7, 20214, 17, 125690, 10398, 7, 44378, 58069, 68342, 7798, 7343, 11, 299, 33310, 4, 158, 37350, 94077, 4569, 299, 33310, 90, 4, 52840, 290, 4, 31270, 112, 299, 682, 4, 52840, 39953, 14079, 193, 52519, 90894, 17894, 120697, 11, 40445, 551, 17, 1019, 52519, 90894, 17756, 963, 11, 40445, 480, 17, 9792, 1120, 5173, 1393, 6240, 16786, 241, 120996, 28, 1245, 1393, 118240, 11123, 1019, 93612, 2691, 10618, 98058, 120409, 1928, 279, 4, 40683, 367, 178, 207, 1019, 103, 103121, 506, 65296, 5, 2], [128022, 21217, 367, 117, 125450, 128, 719, 7, 7308, 40, 93612, 12669, 1116, 16704, 71, 17785, 3699, 15592, 35, 144, 9584, 241, 11943, 713, 950, 799, 2247, 88427, 150, 149, 118813, 120706, 1019, 106906, 81518, 28, 1224, 22799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [128022, 1658, 123311, 5155, 5578, 4722, 279, 14947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_snake_case , model_name='''facebook/m2m100_418M''' , revision='''c168bae485c864188cf9aa0e4108b0b6934dc91e''' , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : Dict = '''facebook/m2m100_418M'''
UpperCAmelCase__ : Dict = [
'''In my opinion, there are two levels of response from the French government.''',
'''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''',
]
UpperCAmelCase__ : Dict = [
'''Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.''',
'''L\'affaire NSA souligne l\'absence totale de dรฉbat sur le renseignement''',
]
# fmt: off
UpperCAmelCase__ : Any = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2]
@classmethod
def lowerCamelCase ( cls : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en''' , tgt_lang='''fr''')
UpperCAmelCase_ = 1
return cls
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
self.assertEqual(self.tokenizer.get_lang_id('''ar''') , 128006)
self.assertEqual(self.tokenizer.get_lang_id('''en''') , 128022)
self.assertEqual(self.tokenizer.get_lang_id('''ro''') , 128076)
self.assertEqual(self.tokenizer.get_lang_id('''mr''') , 128063)
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer.get_vocab()
self.assertEqual(len(_snake_case) , self.tokenizer.vocab_size)
self.assertEqual(vocab['''<unk>'''] , 3)
self.assertIn(self.tokenizer.get_lang_token('''en''') , _snake_case)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _snake_case)
def lowerCamelCase ( self : Any):
"""simple docstring"""
self.assertIn(_snake_case , self.tokenizer.all_special_ids)
# fmt: off
UpperCAmelCase_ = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 14028, 136, 3286, 9706, 6, 90797, 6, 144012, 162, 88128, 30061, 5, 2]
# fmt: on
UpperCAmelCase_ = self.tokenizer.decode(_snake_case , skip_special_tokens=_snake_case)
UpperCAmelCase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_snake_case)
self.assertEqual(_snake_case , _snake_case)
self.assertNotIn(self.tokenizer.eos_token , _snake_case)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(_snake_case)
UpperCAmelCase_ = MaMaaaTokenizer.from_pretrained(_snake_case)
self.assertDictEqual(new_tok.lang_token_to_id , _snake_case)
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = '''en'''
UpperCAmelCase_ = '''fr'''
UpperCAmelCase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_snake_case , return_tensors='''pt''')
UpperCAmelCase_ = shift_tokens_right(
batch['''labels'''] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id)
for k in batch:
UpperCAmelCase_ = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
UpperCAmelCase_ = '''zh'''
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
@require_torch
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''mr'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''mr''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
UpperCAmelCase_ = '''zh'''
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id('''zh''')])
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang)])
@require_torch
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
UpperCAmelCase_ = self.tokenizer._build_translation_inputs('''A test''' , return_tensors='''pt''' , src_lang='''en''' , tgt_lang='''ar''')
self.assertEqual(
nested_simplify(_snake_case) , {
# en_XX, A, test, EOS
'''input_ids''': [[128022, 58, 4183, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 128006,
} , )
| 7 | 1 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def lowercase ( _snake_case : Union[str, Any] , _snake_case : Dict , _snake_case : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Optional[Any] = AlbertConfig.from_json_file(_snake_case )
print(f"""Building PyTorch model from configuration: {config}""" )
__snake_case : Tuple = AlbertForPreTraining(_snake_case )
# Load weights from tf checkpoint
load_tf_weights_in_albert(_snake_case , _snake_case , _snake_case )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _snake_case )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 102 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : List[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
a__ : str = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
a__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161 | 0 |
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''')
@require_sentencepiece
@require_tokenizers
class lowercase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : Union[str, Any] = GPTSwaTokenizer
A__ : Optional[Any] = False
A__ : List[Any] = True
A__ : Union[str, Any] = False
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase_ = GPTSwaTokenizer(__UpperCamelCase , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCamelCase_ ( self : List[str] , __UpperCamelCase : Tuple ):
"""simple docstring"""
UpperCamelCase_ = """This is a test"""
UpperCamelCase_ = """This is a test"""
return input_text, output_text
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase_ = """<s>"""
UpperCamelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) , __UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) , __UpperCamelCase )
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<unk>""" )
self.assertEqual(vocab_keys[1] , """<s>""" )
self.assertEqual(vocab_keys[-1] , """j""" )
self.assertEqual(len(__UpperCamelCase ) , 2_0_0_0 )
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0 )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase_ = GPTSwaTokenizer(__UpperCamelCase )
UpperCamelCase_ = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__UpperCamelCase , ["""โThis""", """โis""", """โa""", """โt""", """est"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2] )
UpperCamelCase_ = tokenizer.tokenize("""I was born in 92000, and this is falsรฉ.""" )
# fmt: off
self.assertListEqual(
__UpperCamelCase , ["""โI""", """โwas""", """โbor""", """n""", """โin""", """โ""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """โand""", """โthis""", """โis""", """โf""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , )
# fmt: on
UpperCamelCase_ = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
self.assertListEqual(
__UpperCamelCase , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , )
UpperCamelCase_ = tokenizer.convert_ids_to_tokens(__UpperCamelCase )
# fmt: off
self.assertListEqual(
__UpperCamelCase , ["""โI""", """โwas""", """โbor""", """n""", """โin""", """โ""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """โand""", """โthis""", """โis""", """โf""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] )
# fmt: on
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase_ = GPTSwaTokenizer(__UpperCamelCase )
UpperCamelCase_ = ["""This is a test""", """I was born in 92000, and this is falsรฉ."""]
UpperCamelCase_ = [
[4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2],
[2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertListEqual(tokenizer.encode_fast(__UpperCamelCase ) , __UpperCamelCase )
# Test that decode_fast returns the input text
for text, token_ids in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(tokenizer.decode_fast(__UpperCamelCase ) , __UpperCamelCase )
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase_ = [
"""<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""",
"""Hey there, how are you doing this fine day?""",
"""This is a text with a trailing spaces followed by a dot .""",
"""Hรคj svรคjs lillebrรถr! =)""",
"""Det รคr inget fel pรฅ Mr. Cool""",
]
# fmt: off
UpperCamelCase_ = {"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__UpperCamelCase , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=__UpperCamelCase , )
| 355 |
from math import pow, sqrt
def lowerCamelCase__ ( *a__ : float ) -> bool:
UpperCamelCase_ = len(a__ ) > 0 and all(value > 0.0 for value in values )
return result
def lowerCamelCase__ ( a__ : float , a__ : float ) -> float | ValueError:
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError:
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError:
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError:
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def lowerCamelCase__ ( a__ : float , a__ : float , a__ : float ) -> float | ValueError:
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(a__ , a__ , a__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 261 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = MODEL_FOR_MASKED_LM_MAPPING
lowerCamelCase__ = TF_MODEL_FOR_MASKED_LM_MAPPING
def A ( self : List[str] ) -> List[Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase : Tuple = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' )
UpperCAmelCase : str = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is grouped''', '''score''': 2.1E-05, '''token''': 38015, '''token_str''': ''' grouped'''},
{'''sequence''': '''My name is accuser''', '''score''': 2.1E-05, '''token''': 25506, '''token_str''': ''' accuser'''},
] , )
UpperCAmelCase : Optional[Any] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is grouped''',
'''score''': 2.1E-05,
'''token''': 38015,
'''token_str''': ''' grouped''',
},
{
'''sequence''': '''The largest city in France is accuser''',
'''score''': 2.1E-05,
'''token''': 25506,
'''token_str''': ''' accuser''',
},
] , )
UpperCAmelCase : Any = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Patrick''', '''score''': 2E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 1.9E-05, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def A ( self : Optional[Any] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' )
UpperCAmelCase : List[Any] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Maul''', '''score''': 2.2E-05, '''token''': 35676, '''token_str''': ''' Maul'''},
{'''sequence''': '''My name isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase : int = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{
'''sequence''': '''The largest city in France is Maul''',
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
},
{'''sequence''': '''The largest city in France isELS''', '''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS'''},
] , )
UpperCAmelCase : Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 2.1E-05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Te''', '''score''': 2E-05, '''token''': 2941, '''token_str''': ''' Te'''},
{'''sequence''': '''My name is Clara''', '''score''': 2E-05, '''token''': 13606, '''token_str''': ''' Clara'''},
] , )
UpperCAmelCase : int = unmasker('''My name is <mask> <mask>''' , top_k=2 )
self.assertEqual(
nested_simplify(__snake_case , decimals=6 ) , [
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is Maul<mask></s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''},
],
[
{
'''score''': 2.2E-05,
'''token''': 35676,
'''token_str''': ''' Maul''',
'''sequence''': '''<s>My name is<mask> Maul</s>''',
},
{'''score''': 2.2E-05, '''token''': 16416, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''},
],
] , )
@require_torch_gpu
def A ( self : Optional[Any] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' )
# convert model to fp16
pipe.model.half()
UpperCAmelCase : List[Any] = pipe('''Paris is the [MASK] of France.''' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__snake_case , __snake_case )
@slow
@require_torch
def A ( self : Optional[Any] ) -> int:
UpperCAmelCase : Tuple = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' )
self.run_large_test(__snake_case )
@slow
@require_tf
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Tuple = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' )
self.run_large_test(__snake_case )
def A ( self : Dict , __snake_case : Any ) -> str:
UpperCAmelCase : List[str] = unmasker('''My name is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''sequence''': '''My name is John''', '''score''': 0.0_08, '''token''': 610, '''token_str''': ''' John'''},
{'''sequence''': '''My name is Chris''', '''score''': 0.0_07, '''token''': 1573, '''token_str''': ''' Chris'''},
] , )
UpperCAmelCase : Optional[int] = unmasker('''The largest city in France is <mask>''' )
self.assertEqual(
nested_simplify(__snake_case ) , [
{
'''sequence''': '''The largest city in France is Paris''',
'''score''': 0.2_51,
'''token''': 2201,
'''token_str''': ''' Paris''',
},
{
'''sequence''': '''The largest city in France is Lyon''',
'''score''': 0.2_14,
'''token''': 12790,
'''token_str''': ''' Lyon''',
},
] , )
UpperCAmelCase : Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 )
self.assertEqual(
nested_simplify(__snake_case ) , [
{'''sequence''': '''My name is Patrick''', '''score''': 0.0_05, '''token''': 3499, '''token_str''': ''' Patrick'''},
{'''sequence''': '''My name is Clara''', '''score''': 0.0_00, '''token''': 13606, '''token_str''': ''' Clara'''},
{'''sequence''': '''My name is Te''', '''score''': 0.0_00, '''token''': 2941, '''token_str''': ''' Te'''},
] , )
@require_torch
def A ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase : List[str] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' )
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Union[str, Any] = None
self.run_pipeline_test(__snake_case , [] )
@require_tf
def A ( self : Dict ) -> List[Any]:
UpperCAmelCase : List[str] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' )
UpperCAmelCase : Dict = None
UpperCAmelCase : Union[str, Any] = None
self.run_pipeline_test(__snake_case , [] )
def A ( self : Union[str, Any] , __snake_case : Optional[int] , __snake_case : int , __snake_case : Optional[int] ) -> Union[str, Any]:
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' )
UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : Optional[Any] = [
F"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def A ( self : List[str] , __snake_case : Any , __snake_case : str ) -> str:
UpperCAmelCase : int = fill_masker.tokenizer
UpperCAmelCase : Tuple = fill_masker.model
UpperCAmelCase : Optional[Any] = fill_masker(
F"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : int = fill_masker([F"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : Union[str, Any] = fill_masker([F"""This is a {tokenizer.mask_token}""", F"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__snake_case , [
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
] , )
with self.assertRaises(__snake_case ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__snake_case ):
fill_masker('''This is''' )
self.run_test_top_k(__snake_case , __snake_case )
self.run_test_targets(__snake_case , __snake_case )
self.run_test_top_k_targets(__snake_case , __snake_case )
self.fill_mask_with_duplicate_targets_and_top_k(__snake_case , __snake_case )
self.fill_mask_with_multiple_masks(__snake_case , __snake_case )
def A ( self : Any , __snake_case : Any , __snake_case : List[Any] ) -> List[Any]:
UpperCAmelCase : Tuple = tokenizer.get_vocab()
UpperCAmelCase : List[str] = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase : Any = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , targets=__snake_case )
UpperCAmelCase : Dict = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : Any = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __snake_case )
UpperCAmelCase : int = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__snake_case ) )
# Call argument
UpperCAmelCase : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__snake_case )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el['''token'''] for el in outputs} , __snake_case )
UpperCAmelCase : Union[str, Any] = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['''token_str'''] for el in outputs} , set(__snake_case ) )
# Score equivalence
UpperCAmelCase : int = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__snake_case )
UpperCAmelCase : int = [top_mask['''token_str'''] for top_mask in outputs]
UpperCAmelCase : Optional[int] = [top_mask['''score'''] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ) == set(__snake_case ):
UpperCAmelCase : List[str] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=__snake_case )
UpperCAmelCase : str = [top_mask['''score'''] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
# Raises with invalid
with self.assertRaises(__snake_case ):
UpperCAmelCase : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__snake_case ):
UpperCAmelCase : Union[str, Any] = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets=[''''''] )
with self.assertRaises(__snake_case ):
UpperCAmelCase : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , targets='''''' )
def A ( self : Optional[Any] , __snake_case : Any , __snake_case : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case , top_k=2 )
UpperCAmelCase : Optional[int] = fill_masker(F"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
UpperCAmelCase : Union[str, Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__snake_case , [
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
] , )
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def A ( self : Dict , __snake_case : List[Any] , __snake_case : Tuple ) -> int:
UpperCAmelCase : List[str] = tokenizer.get_vocab()
UpperCAmelCase : Optional[int] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
# top_k=2, ntargets=3
UpperCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:3]
UpperCAmelCase : Any = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__snake_case )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase : Union[str, Any] = [el['''token_str'''] for el in sorted(__snake_case , key=lambda __snake_case : x["score"] , reverse=__snake_case )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__snake_case ).issubset(__snake_case ):
UpperCAmelCase : Tuple = fill_masker(F"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__snake_case )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__snake_case ) , nested_simplify(__snake_case ) )
def A ( self : Tuple , __snake_case : Dict , __snake_case : Dict ) -> Union[str, Any]:
UpperCAmelCase : List[str] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : str = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase : Optional[int] = sorted(vocab.keys() )[:3]
UpperCAmelCase : Union[str, Any] = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase : List[Any] = fill_masker(F"""My name is {tokenizer.mask_token}""" , targets=__snake_case , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__snake_case ) , 3 )
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase : List[Any] = FillMaskPipeline(model=__snake_case , tokenizer=__snake_case )
UpperCAmelCase : Optional[Any] = fill_masker(
F"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__snake_case , [
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
[
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
{'''sequence''': ANY(__snake_case ), '''score''': ANY(__snake_case ), '''token''': ANY(__snake_case ), '''token_str''': ANY(__snake_case )},
],
] , )
| 23 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_UpperCAmelCase : List[str] = 8
def __magic_name__( lowerCamelCase, lowerCamelCase=BITS):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x * 2_5_5).int().clamp(0, 2_5_5)
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCamelCase)
__lowerCAmelCase = rearrange(lowerCamelCase, '''d -> d 1 1''')
__lowerCAmelCase = rearrange(lowerCamelCase, '''b c h w -> b c 1 h w''')
__lowerCAmelCase = ((x & mask) != 0).float()
__lowerCAmelCase = rearrange(lowerCamelCase, '''b c d h w -> b (c d) h w''')
__lowerCAmelCase = bits * 2 - 1
return bits
def __magic_name__( lowerCamelCase, lowerCamelCase=BITS):
__lowerCAmelCase = x.device
__lowerCAmelCase = (x > 0).int()
__lowerCAmelCase = 2 ** torch.arange(bits - 1, -1, -1, device=lowerCamelCase, dtype=torch.intaa)
__lowerCAmelCase = rearrange(lowerCamelCase, '''d -> d 1 1''')
__lowerCAmelCase = rearrange(lowerCamelCase, '''b (c d) h w -> b c d h w''', d=8)
__lowerCAmelCase = reduce(x * mask, '''b c d h w -> b c h w''', '''sum''')
return (dec / 2_5_5).clamp(0.0, 1.0)
def __magic_name__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 0.0, lowerCamelCase = True, lowerCamelCase=None, lowerCamelCase = True, ):
if self.num_inference_steps is None:
raise ValueError(
'''Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler''')
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> ฮท
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
__lowerCAmelCase = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[timestep]
__lowerCAmelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
__lowerCAmelCase = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCamelCase, -scale, lowerCamelCase)
# 5. compute variance: "sigma_t(ฮท)" -> see formula (16)
# ฯ_t = sqrt((1 โ ฮฑ_tโ1)/(1 โ ฮฑ_t)) * sqrt(1 โ ฮฑ_t/ฮฑ_tโ1)
__lowerCAmelCase = self._get_variance(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
__lowerCAmelCase = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
__lowerCAmelCase = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
__lowerCAmelCase = model_output.device if torch.is_tensor(lowerCamelCase) else '''cpu'''
__lowerCAmelCase = torch.randn(model_output.shape, dtype=model_output.dtype, generator=lowerCamelCase).to(lowerCamelCase)
__lowerCAmelCase = self._get_variance(lowerCamelCase, lowerCamelCase) ** 0.5 * eta * noise
__lowerCAmelCase = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=lowerCamelCase, pred_original_sample=lowerCamelCase)
def __magic_name__( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase="epsilon", lowerCamelCase=None, lowerCamelCase = True, ):
__lowerCAmelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
__lowerCAmelCase , __lowerCAmelCase = torch.split(lowerCamelCase, sample.shape[1], dim=1)
else:
__lowerCAmelCase = None
# 1. compute alphas, betas
__lowerCAmelCase = self.alphas_cumprod[t]
__lowerCAmelCase = self.alphas_cumprod[t - 1] if t > 0 else self.one
__lowerCAmelCase = 1 - alpha_prod_t
__lowerCAmelCase = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
__lowerCAmelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
__lowerCAmelCase = model_output
else:
raise ValueError(F"""Unsupported prediction_type {prediction_type}.""")
# 3. Clip "predicted x_0"
__lowerCAmelCase = self.bit_scale
if self.config.clip_sample:
__lowerCAmelCase = torch.clamp(lowerCamelCase, -scale, lowerCamelCase)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
__lowerCAmelCase = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample ยต_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
__lowerCAmelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
__lowerCAmelCase = 0
if t > 0:
__lowerCAmelCase = torch.randn(
model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=lowerCamelCase).to(model_output.device)
__lowerCAmelCase = (self._get_variance(lowerCamelCase, predicted_variance=lowerCamelCase) ** 0.5) * noise
__lowerCAmelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=lowerCamelCase, pred_original_sample=lowerCamelCase)
class a__ ( __A ):
"""simple docstring"""
def __init__(self , __lowercase , __lowercase , __lowercase = 1.0 , ):
super().__init__()
__lowerCAmelCase = bit_scale
__lowerCAmelCase = (
ddim_bit_scheduler_step if isinstance(__lowercase , __lowercase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__(self , __lowercase = 2_56 , __lowercase = 2_56 , __lowercase = 50 , __lowercase = None , __lowercase = 1 , __lowercase = "pil" , __lowercase = True , **__lowercase , ):
__lowerCAmelCase = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=__lowercase , )
__lowerCAmelCase = decimal_to_bits(__lowercase ) * self.bit_scale
__lowerCAmelCase = latents.to(self.device )
self.scheduler.set_timesteps(__lowercase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
__lowerCAmelCase = self.unet(__lowercase , __lowercase ).sample
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
__lowerCAmelCase = bits_to_decimal(__lowercase )
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 174 | 0 |
import math
def lowerCamelCase__ ( a ) -> list[int]:
_A: Dict = []
_A: int = 2
_A: Any = int(math.sqrt(a ) ) # Size of every segment
_A: int = [True] * (end + 1)
_A: Any = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
_A: int = False
start += 1
prime += in_prime
_A: Union[str, Any] = end + 1
_A: List[Any] = min(2 * end , a )
while low <= n:
_A: Any = [True] * (high - low + 1)
for each in in_prime:
_A: Optional[int] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
_A: Union[str, Any] = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
_A: Optional[int] = high + 1
_A: Dict = min(high + end , a )
return prime
print(sieve(10**6))
| 301 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[Any] = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
UpperCAmelCase__ : Tuple = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
UpperCAmelCase__ : Optional[int] = {'facebook/blenderbot_small-90M': 512}
def lowerCamelCase__ ( a ) -> Optional[Any]:
_A: List[Any] = set()
_A: List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_A: List[Any] = char
_A: Union[str, Any] = set(a )
return pairs
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__UpperCamelCase : str = VOCAB_FILES_NAMES
__UpperCamelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
def __init__( self : int , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : List[str]="__start__" , lowerCAmelCase_ : Any="__end__" , lowerCAmelCase_ : Any="__unk__" , lowerCAmelCase_ : Any="__null__" , **lowerCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(unk_token=lowerCAmelCase_ , bos_token=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , **lowerCAmelCase_ )
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as vocab_handle:
_A: Optional[int] = json.load(lowerCAmelCase_ )
_A: int = {v: k for k, v in self.encoder.items()}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as merges_handle:
_A: Dict = merges_handle.read().split('''\n''' )[1:-1]
_A: int = [tuple(merge.split() ) for merge in merges]
_A: Dict = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_A: Union[str, Any] = {}
@property
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def __magic_name__ ( self : Optional[int] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_A: List[Any] = re.sub('''([.,!?()])''' , R''' \1''' , lowerCAmelCase_ )
_A: List[Any] = re.sub('''(\')''' , R''' \1 ''' , lowerCAmelCase_ )
_A: List[Any] = re.sub(R'''\s{2,}''' , ''' ''' , lowerCAmelCase_ )
if "\n" in token:
_A: Dict = token.replace('''\n''' , ''' __newln__''' )
_A: Any = token.split(''' ''' )
_A: Optional[Any] = []
for token in tokens:
if not len(lowerCAmelCase_ ):
continue
_A: str = token.lower()
_A: List[str] = tuple(lowerCAmelCase_ )
_A: str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_A: Dict = get_pairs(lowerCAmelCase_ )
if not pairs:
words.append(lowerCAmelCase_ )
continue
while True:
_A: str = min(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : self.bpe_ranks.get(lowerCAmelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_A , _A: Optional[int] = bigram
_A: str = []
_A: Dict = 0
while i < len(lowerCAmelCase_ ):
try:
_A: List[Any] = word.index(lowerCAmelCase_ , lowerCAmelCase_ )
new_word.extend(word[i:j] )
_A: Optional[int] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_A: Union[str, Any] = tuple(lowerCAmelCase_ )
_A: Tuple = new_word
if len(lowerCAmelCase_ ) == 1:
break
else:
_A: Optional[int] = get_pairs(lowerCAmelCase_ )
_A: str = '''@@ '''.join(lowerCAmelCase_ )
_A: Tuple = word[:-4]
_A: List[Any] = word
words.append(lowerCAmelCase_ )
return " ".join(lowerCAmelCase_ )
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[Any] = []
_A: List[Any] = re.findall(R'''\S+\n?''' , lowerCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCAmelCase_ ).split(''' ''' ) ) )
return split_tokens
def __magic_name__ ( self : str , lowerCAmelCase_ : str ):
"""simple docstring"""
_A: List[str] = token.lower()
return self.encoder.get(lowerCAmelCase_ , self.encoder.get(self.unk_token ) )
def __magic_name__ ( self : int , lowerCAmelCase_ : int ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase_ , self.unk_token )
def __magic_name__ ( self : List[str] , lowerCAmelCase_ : List[str] ):
"""simple docstring"""
_A: List[str] = ''' '''.join(lowerCAmelCase_ ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __magic_name__ ( self : List[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_A: Dict = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_A: Any = os.path.join(
lowerCAmelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase_ , ensure_ascii=lowerCAmelCase_ ) + '''\n''' )
_A: List[str] = 0
with open(lowerCAmelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
_A: Optional[int] = token_index
writer.write(''' '''.join(lowerCAmelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
| 301 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
A_ : Tuple = logging.get_logger(__name__)
def A ( snake_case__ , snake_case__=False ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def A ( snake_case__ , snake_case__ , snake_case__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE__ = """"""
else:
SCREAMING_SNAKE_CASE__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" )
SCREAMING_SNAKE_CASE__ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE__ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE__ = in_proj_bias[-config.hidden_size :]
def A ( snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def A ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = dct.pop(snake_case__ )
SCREAMING_SNAKE_CASE__ = val
def A ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def A ( snake_case__ , snake_case__ , snake_case__=True ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ViTConfig()
# patch_size
if model_name[-1] == "8":
SCREAMING_SNAKE_CASE__ = 8
# set labels if required
if not base_model:
SCREAMING_SNAKE_CASE__ = 10_00
SCREAMING_SNAKE_CASE__ = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ = {int(snake_case__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ = idalabel
SCREAMING_SNAKE_CASE__ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
SCREAMING_SNAKE_CASE__ = 3_84
SCREAMING_SNAKE_CASE__ = 15_36
SCREAMING_SNAKE_CASE__ = 12
SCREAMING_SNAKE_CASE__ = 6
# load original model from torch hub
SCREAMING_SNAKE_CASE__ = torch.hub.load("""facebookresearch/dino:main""" , snake_case__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
SCREAMING_SNAKE_CASE__ = original_model.state_dict()
if base_model:
remove_classification_head_(snake_case__ )
SCREAMING_SNAKE_CASE__ = create_rename_keys(snake_case__ , base_model=snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
read_in_q_k_v(snake_case__ , snake_case__ , snake_case__ )
# load HuggingFace model
if base_model:
SCREAMING_SNAKE_CASE__ = ViTModel(snake_case__ , add_pooling_layer=snake_case__ ).eval()
else:
SCREAMING_SNAKE_CASE__ = ViTForImageClassification(snake_case__ ).eval()
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by ViTImageProcessor
SCREAMING_SNAKE_CASE__ = ViTImageProcessor()
SCREAMING_SNAKE_CASE__ = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE__ = encoding["""pixel_values"""]
SCREAMING_SNAKE_CASE__ = model(snake_case__ )
if base_model:
SCREAMING_SNAKE_CASE__ = original_model(snake_case__ )
assert torch.allclose(snake_case__ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
SCREAMING_SNAKE_CASE__ = original_model(snake_case__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(snake_case__ , outputs.logits , atol=1e-3 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
A_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="dino_vitb16",
type=str,
help="Name of the model trained with DINO you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--base_model",
action="store_true",
help="Whether to only convert the base model (no projection head weights).",
)
parser.set_defaults(base_model=True)
A_ : int = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 165 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Union[str, Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : Dict=True ) -> Any:
SCREAMING_SNAKE_CASE__ = ()
for resnet, attn in zip(self.resnets , self.attentions ):
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=__UpperCAmelCase , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Any , __UpperCAmelCase : int , __UpperCAmelCase : str , __UpperCAmelCase : Union[str, Any]=True ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = ()
for resnet in self.resnets:
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
output_states += (hidden_states,)
if self.add_downsample:
SCREAMING_SNAKE_CASE__ = self.downsamplers_a(__UpperCAmelCase )
output_states += (hidden_states,)
return hidden_states, output_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : List[str] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : Any=True ) -> Union[str, Any]:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = True
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = []
for i in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
SCREAMING_SNAKE_CASE__ = self.prev_output_channel if i == 0 else self.out_channels
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str]=True ) -> Dict:
for resnet in self.resnets:
# pop res hidden states
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[-1]
SCREAMING_SNAKE_CASE__ = res_hidden_states_tuple[:-1]
SCREAMING_SNAKE_CASE__ = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
if self.add_upsample:
SCREAMING_SNAKE_CASE__ = self.upsamplers_a(__UpperCAmelCase )
return hidden_states
class lowerCamelCase (nn.Module ):
lowerCamelCase__ : int
lowerCamelCase__ : float = 0.0
lowerCamelCase__ : int = 1
lowerCamelCase__ : int = 1
lowerCamelCase__ : bool = False
lowerCamelCase__ : bool = False
lowerCamelCase__ : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
# there is always at least one resnet
SCREAMING_SNAKE_CASE__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
SCREAMING_SNAKE_CASE__ = []
for _ in range(self.num_layers ):
SCREAMING_SNAKE_CASE__ = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnets
SCREAMING_SNAKE_CASE__ = attentions
def __call__( self : Tuple , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Tuple , __UpperCAmelCase : int , __UpperCAmelCase : List[str]=True ) -> Tuple:
SCREAMING_SNAKE_CASE__ = self.resnets[0](__UpperCAmelCase , __UpperCAmelCase )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
SCREAMING_SNAKE_CASE__ = attn(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = resnet(__UpperCAmelCase , __UpperCAmelCase , deterministic=__UpperCAmelCase )
return hidden_states
| 165 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def _lowerCAmelCase ( __lowerCAmelCase ) -> typing.Counter[int]:
"""simple docstring"""
snake_case__ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__lowerCAmelCase , max_perimeter + 1 ):
snake_case__ : Any = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCAmelCase ):
snake_case__ : Union[str, Any] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def _lowerCAmelCase ( __lowerCAmelCase = 1000 ) -> int:
"""simple docstring"""
snake_case__ : Union[str, Any] = pythagorean_triple(__lowerCAmelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 44 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
A__ = False
class a ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Dict = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : Optional[int] = pipe.dual_guided(
prompt='''first prompt''' ,image=__lowercase ,text_to_image_strength=0.75 ,generator=__lowercase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__lowercase )
snake_case__ : Any = VersatileDiffusionPipeline.from_pretrained(__lowercase ,torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : List[str] = generator.manual_seed(0 )
snake_case__ : Any = pipe.dual_guided(
prompt='''first prompt''' ,image=__lowercase ,text_to_image_strength=0.75 ,generator=__lowercase ,guidance_scale=7.5 ,num_inference_steps=2 ,output_type='''numpy''' ,).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __lowerCamelCase ( self :Dict ):
snake_case__ : Optional[int] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' ,torch_dtype=torch.floataa )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : List[Any] = '''cyberpunk 2077'''
snake_case__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
snake_case__ : Optional[int] = torch.manual_seed(0 )
snake_case__ : Any = pipe.dual_guided(
prompt=__lowercase ,image=__lowercase ,text_to_image_strength=0.75 ,generator=__lowercase ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ,).images
snake_case__ : int = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : List[str] = np.array([0.1448, 0.1619, 0.1741, 0.1086, 0.1147, 0.1128, 0.1199, 0.1165, 0.1001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ : Any = '''A painting of a squirrel eating a burger '''
snake_case__ : List[str] = torch.manual_seed(0 )
snake_case__ : int = pipe.text_to_image(
prompt=__lowercase ,generator=__lowercase ,guidance_scale=7.5 ,num_inference_steps=5_0 ,output_type='''numpy''' ).images
snake_case__ : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : str = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
snake_case__ : List[Any] = pipe.image_variation(__lowercase ,generator=__lowercase ,output_type='''numpy''' ).images
snake_case__ : str = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[int] = np.array([0.3076, 0.3123, 0.3284, 0.3782, 0.3770, 0.3894, 0.4297, 0.4331, 0.4456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 44 | 1 |
def _snake_case( SCREAMING_SNAKE_CASE__ : int = 1000 ) -> int:
'''simple docstring'''
A__ = -1
A__ = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
A__ = (n * n - 2 * a * n) // (2 * n - 2 * a)
A__ = n - a - b
if c * c == (a * a + b * b):
A__ = a * b * c
if candidate >= product:
A__ = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 7 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
lowercase_ = version.parse(version.parse(torch.__version__).base_version) < version.parse("1.11")
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : tuple , SCREAMING_SNAKE_CASE__ : Path , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False , ) -> Union[str, Any]:
'''simple docstring'''
output_path.parent.mkdir(parents=SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , use_external_data_format=SCREAMING_SNAKE_CASE__ , enable_onnx_checker=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
else:
export(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , f=output_path.as_posix() , input_names=SCREAMING_SNAKE_CASE__ , output_names=SCREAMING_SNAKE_CASE__ , dynamic_axes=SCREAMING_SNAKE_CASE__ , do_constant_folding=SCREAMING_SNAKE_CASE__ , opset_version=SCREAMING_SNAKE_CASE__ , )
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : bool = False ) -> Tuple:
'''simple docstring'''
A__ = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A__ = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
A__ = 'cpu'
A__ = Path(SCREAMING_SNAKE_CASE__ )
# VAE DECODER
A__ = AutoencoderKL.from_pretrained(model_path + '/vae' )
A__ = vae_decoder.config.latent_channels
# forward only through the decoder part
A__ = vae_decoder.decode
onnx_export(
SCREAMING_SNAKE_CASE__ , model_args=(
torch.randn(1 , SCREAMING_SNAKE_CASE__ , 25 , 25 ).to(device=SCREAMING_SNAKE_CASE__ , dtype=SCREAMING_SNAKE_CASE__ ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=SCREAMING_SNAKE_CASE__ , )
del vae_decoder
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--model_path",
type=str,
required=True,
help="Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).",
)
parser.add_argument("--output_path", type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--opset",
default=14,
type=int,
help="The version of the ONNX operator set to use.",
)
parser.add_argument("--fp16", action="store_true", default=False, help="Export the models in `float16` mode")
lowercase_ = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print("SD: Done: ONNX")
| 7 | 1 |
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int) -> Optional[Any]:
'''simple docstring'''
__UpperCamelCase : int = OmegaConf.load(_lowerCamelCase)
__UpperCamelCase : Any = torch.load(_lowerCamelCase , map_location="cpu")["model"]
__UpperCamelCase : Tuple = list(state_dict.keys())
# extract state_dict for VQVAE
__UpperCamelCase : List[str] = {}
__UpperCamelCase : Optional[int] = "first_stage_model."
for key in keys:
if key.startswith(_lowerCamelCase):
__UpperCamelCase : List[Any] = state_dict[key]
# extract state_dict for UNetLDM
__UpperCamelCase : List[str] = {}
__UpperCamelCase : str = "model.diffusion_model."
for key in keys:
if key.startswith(_lowerCamelCase):
__UpperCamelCase : Optional[int] = state_dict[key]
__UpperCamelCase : str = config.model.params.first_stage_config.params
__UpperCamelCase : int = config.model.params.unet_config.params
__UpperCamelCase : int = VQModel(**_lowerCamelCase).eval()
vqvae.load_state_dict(_lowerCamelCase)
__UpperCamelCase : int = UNetLDMModel(**_lowerCamelCase).eval()
unet.load_state_dict(_lowerCamelCase)
__UpperCamelCase : int = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=_lowerCamelCase , )
__UpperCamelCase : Dict = LDMPipeline(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
pipeline.save_pretrained(_lowerCamelCase)
if __name__ == "__main__":
lowercase : Dict = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', type=str, required=True)
parser.add_argument('--config_path', type=str, required=True)
parser.add_argument('--output_path', type=str, required=True)
lowercase : Dict = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path) | 151 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int]) -> Dict:
'''simple docstring'''
return EnvironmentCommand()
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict) -> Dict:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file)
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( a :ArgumentParser ) -> str:
__UpperCamelCase : List[Any] = parser.add_parser("env" )
download_parser.set_defaults(func=a )
download_parser.add_argument(
"--accelerate-config_file" , default=a , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a )
def __init__( self :Tuple , a :Dict , *a :List[str] ) -> None:
__UpperCamelCase : List[str] = accelerate_config_file
def _lowerCamelCase ( self :int ) -> Dict:
__UpperCamelCase : int = "not installed"
if is_safetensors_available():
import safetensors
__UpperCamelCase : List[str] = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
__UpperCamelCase : Optional[Any] = f'{safetensors.__version__} but is ignored because of PyTorch version too old.'
__UpperCamelCase : List[str] = "not installed"
__UpperCamelCase : List[str] = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
__UpperCamelCase : Tuple = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a ):
__UpperCamelCase : Dict = load_config_from_file(self._accelerate_config_file ).to_dict()
__UpperCamelCase : int = (
"\n".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(a , a )
else f'\t{accelerate_config}'
)
__UpperCamelCase : List[Any] = "not installed"
__UpperCamelCase : Dict = "NA"
if is_torch_available():
import torch
__UpperCamelCase : Optional[int] = torch.__version__
__UpperCamelCase : Optional[Any] = torch.cuda.is_available()
__UpperCamelCase : Dict = "not installed"
__UpperCamelCase : str = "NA"
if is_tf_available():
import tensorflow as tf
__UpperCamelCase : Optional[Any] = tf.__version__
try:
# deprecated in v2.1
__UpperCamelCase : Dict = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
__UpperCamelCase : Optional[Any] = bool(tf.config.list_physical_devices("GPU" ) )
__UpperCamelCase : List[Any] = "not installed"
__UpperCamelCase : Any = "not installed"
__UpperCamelCase : Tuple = "not installed"
__UpperCamelCase : Optional[int] = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
__UpperCamelCase : int = flax.__version__
__UpperCamelCase : Any = jax.__version__
__UpperCamelCase : Optional[int] = jaxlib.__version__
__UpperCamelCase : List[Any] = jax.lib.xla_bridge.get_backend().platform
__UpperCamelCase : Optional[Any] = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'{safetensors_version}',
"Accelerate version": f'{accelerate_version}',
"Accelerate config": f'{accelerate_config_str}',
"PyTorch version (GPU?)": f'{pt_version} ({pt_cuda_available})',
"Tensorflow version (GPU?)": f'{tf_version} ({tf_cuda_available})',
"Flax version (CPU?/GPU?/TPU?)": f'{flax_version} ({jax_backend})',
"Jax version": f'{jax_version}',
"JaxLib version": f'{jaxlib_version}',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a ) )
return info
@staticmethod
def _lowerCamelCase ( a :str ) -> int:
return "\n".join([f'- {prop}: {val}' for prop, val in d.items()] ) + "\n" | 151 | 1 |
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : str =field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a : ClassVar[Features] =Features({"text": Value("string" )} )
a : ClassVar[Features] =Features({"labels": ClassLabel} )
a : str ="text"
a : str ="labels"
def lowercase__ ( self , snake_case__ ):
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , snake_case__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
lowerCAmelCase : Optional[int] = copy.deepcopy(self )
lowerCAmelCase : str = self.label_schema.copy()
lowerCAmelCase : Dict = features[self.label_column]
lowerCAmelCase : Union[str, Any] = label_schema
return task_template
@property
def lowercase__ ( self ):
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 108 | """simple docstring"""
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__:Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Any = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
SCREAMING_SNAKE_CASE__:Optional[int] = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowerCamelCase( a , a , a , a , a ):
for attribute in key.split("." ):
__a = getattr(a , a )
if weight_type is not None:
__a = getattr(a , a ).shape
else:
__a = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__a = value
elif weight_type == "weight_g":
__a = value
elif weight_type == "weight_v":
__a = value
elif weight_type == "bias":
__a = value
else:
__a = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def _lowerCamelCase( a , a ):
__a = []
__a = fairseq_model.state_dict()
__a = hf_model.feature_extractor
__a = hf_model.adapter
for name, value in fairseq_dict.items():
__a = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == "group" , )
__a = True
elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."] ):
load_adapter(a , a , a , a )
__a = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__a = True
if "*" in mapped_key:
__a = name.split(a )[0].split("." )[-2]
__a = mapped_key.replace("*" , a )
if "weight_g" in name:
__a = "weight_g"
elif "weight_v" in name:
__a = "weight_v"
elif "bias" in name:
__a = "bias"
elif "weight" in name:
__a = "weight"
else:
__a = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(F"Unused weights: {unused_weights}" )
def _lowerCamelCase( a , a , a , a , a ):
__a = full_name.split("conv_layers." )[-1]
__a = name.split("." )
__a = int(items[0] )
__a = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__a = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a , a , a , a ):
__a = full_name.split("adaptor." )[-1]
__a = name.split("." )
if items[1].isdigit():
__a = int(items[1] )
else:
__a = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer norm bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found."
__a = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer bias was initialized from {full_name}." )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found."
__a = value
logger.info(F"Adapter proj layer weight was initialized from {full_name}." )
elif isinstance(a , a ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found."
__a = value
logger.info(F"Adapter layer {layer_id} bias was initialized from {full_name}." )
else:
unused_weights.append(a )
def _lowerCamelCase( a ):
__a , __a = emb.weight.shape
__a = nn.Linear(a , a , bias=a )
__a = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowerCamelCase( a , a , a , a , a , a , a , a , a , a , a , ):
__a = WavaVecaConfig.from_pretrained(
a , add_adapter=a , adapter_stride=a , adapter_kernel_size=a , use_auth_token=a , output_hidden_size=a , )
__a = MBartConfig.from_pretrained(a )
# load model
__a , __a , __a = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
"config_yaml": config_yaml_path,
"data": "/".join(dict_path.split("/" )[:-1] ),
"w2v_path": checkpoint_path,
"load_pretrained_decoder_from": None,
} , )
__a = model[0].eval()
# load feature extractor
__a = WavaVecaFeatureExtractor.from_pretrained(a , use_auth_token=a )
# set weights for wav2vec2 encoder
__a = WavaVecaModel(a )
recursively_load_weights_wavaveca(model.encoder , a )
# load decoder weights
__a = MBartForCausalLM(a )
__a , __a = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=a )
logger.warning(F"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(F"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
__a = SpeechEncoderDecoderModel(encoder=a , decoder=a )
__a = False
__a = MBartaaTokenizer(a )
tokenizer.save_pretrained(a )
__a = hf_wavavec.config.to_dict()
__a = tokenizer.pad_token_id
__a = tokenizer.bos_token_id
__a = tokenizer.eos_token_id
__a = "mbart50"
__a = "wav2vec2"
__a = tokenizer.eos_token_id
__a = 2_5_0_0_0_4
__a = tokenizer.eos_token_id
__a = SpeechEncoderDecoderConfig.from_dict(a )
hf_wavavec.save_pretrained(a )
feature_extractor.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=1024, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=250004, type=int, help="""`decoder_start_token_id` of model config""")
SCREAMING_SNAKE_CASE__:List[Any] = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 261 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""google/switch-base-8""": """https://huggingface.co/google/switch-base-8/blob/main/config.json""",
}
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : str = '''switch_transformers'''
__UpperCAmelCase : List[str] = ['''past_key_values''']
__UpperCAmelCase : Any = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , UpperCamelCase__=3_2128 , UpperCamelCase__=768 , UpperCamelCase__=64 , UpperCamelCase__=2048 , UpperCamelCase__=64 , UpperCamelCase__=12 , UpperCamelCase__=3 , UpperCamelCase__=12 , UpperCamelCase__=3 , UpperCamelCase__=12 , UpperCamelCase__=8 , UpperCamelCase__=False , UpperCamelCase__=0.01 , UpperCamelCase__="float32" , UpperCamelCase__=False , UpperCamelCase__=32 , UpperCamelCase__=128 , UpperCamelCase__=0.1 , UpperCamelCase__=1e-6 , UpperCamelCase__=0.001 , UpperCamelCase__=0.001 , UpperCamelCase__=1.0 , UpperCamelCase__="relu" , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=0 , UpperCamelCase__=1 , **UpperCamelCase__ , ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = vocab_size
snake_case : Any = d_model
snake_case : int = d_kv
snake_case : List[str] = d_ff
snake_case : List[Any] = num_sparse_encoder_layers
snake_case : Dict = num_layers
snake_case : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
snake_case : int = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
snake_case : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
snake_case : List[Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
snake_case : Tuple = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
snake_case : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
snake_case : Optional[Any] = num_heads
snake_case : Any = num_experts
snake_case : Union[str, Any] = expert_capacity
snake_case : Any = router_bias
snake_case : Dict = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
snake_case : Union[str, Any] = router_dtype
snake_case : Dict = router_ignore_padding_tokens
snake_case : Optional[Any] = relative_attention_num_buckets
snake_case : List[str] = relative_attention_max_distance
snake_case : List[Any] = dropout_rate
snake_case : Union[str, Any] = layer_norm_epsilon
snake_case : Optional[Any] = initializer_factor
snake_case : List[str] = feed_forward_proj
snake_case : Optional[int] = use_cache
snake_case : List[str] = add_router_probs
snake_case : Optional[int] = router_z_loss_coef
snake_case : List[str] = router_aux_loss_coef
snake_case : Optional[int] = self.feed_forward_proj.split("-" )
snake_case : Tuple = act_info[-1]
snake_case : Any = act_info[0] == "gated"
if len(UpperCamelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase__ ) > 2:
raise ValueError(
F'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
snake_case : Union[str, Any] = "gelu_new"
super().__init__(
pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ , )
| 112 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCAmelCase ( lowercase : List[str] , lowercase : int , lowercase : Dict , lowercase : Dict , lowercase : int ) -> int:
"""simple docstring"""
with open(lowercase ) as metadata_file:
snake_case : str = json.load(lowercase )
snake_case : Optional[Any] = LukeConfig(use_entity_aware_attention=lowercase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
snake_case : Tuple = torch.load(lowercase , map_location="cpu" )["module"]
# Load the entity vocab file
snake_case : Optional[Any] = load_original_entity_vocab(lowercase )
# add an entry for [MASK2]
snake_case : Dict = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
snake_case : Union[str, Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
snake_case : Tuple = AddedToken("<ent>" , lstrip=lowercase , rstrip=lowercase )
snake_case : str = AddedToken("<ent2>" , lstrip=lowercase , rstrip=lowercase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowercase )
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "r" ) as f:
snake_case : str = json.load(lowercase )
snake_case : List[str] = "MLukeTokenizer"
with open(os.path.join(lowercase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(lowercase , lowercase )
with open(os.path.join(lowercase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(lowercase , lowercase )
snake_case : Dict = MLukeTokenizer.from_pretrained(lowercase )
# Initialize the embeddings of the special tokens
snake_case : Tuple = tokenizer.convert_tokens_to_ids(["@"] )[0]
snake_case : str = tokenizer.convert_tokens_to_ids(["#"] )[0]
snake_case : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
snake_case : str = word_emb[ent_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = word_emb[enta_init_index].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
snake_case : Tuple = state_dict[bias_name]
snake_case : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
snake_case : Optional[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
snake_case : Any = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
snake_case : Optional[int] = F'encoder.layer.{layer_index}.attention.self.'
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Optional[Any] = state_dict[prefix + matrix_name]
snake_case : Any = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
snake_case : List[Any] = state_dict["entity_embeddings.entity_embeddings.weight"]
snake_case : str = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Tuple = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
snake_case : Optional[int] = state_dict["entity_predictions.bias"]
snake_case : Optional[int] = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
snake_case : Union[str, Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
snake_case : Union[str, Any] = LukeForMaskedLM(config=lowercase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
snake_case : Tuple = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
snake_case : Any = state_dict[key]
else:
snake_case : Tuple = state_dict[key]
snake_case ,snake_case : Optional[Any] = model.load_state_dict(lowercase , strict=lowercase )
if set(lowercase ) != {"luke.embeddings.position_ids"}:
raise ValueError(F'Unexpected unexpected_keys: {unexpected_keys}' )
if set(lowercase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
snake_case : Optional[Any] = MLukeTokenizer.from_pretrained(lowercase , task="entity_classification" )
snake_case : List[str] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and ใขใใฌใในใฟใณ (Afghanistan)."
snake_case : str = (0, 9)
snake_case : Union[str, Any] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : int = model(**lowercase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : int = torch.Size((1, 33, 768) )
snake_case : str = torch.tensor([[0.0892, 0.0596, -0.2819], [0.0134, 0.1199, 0.0573], [-0.0169, 0.0927, 0.0644]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
snake_case : Any = torch.Size((1, 1, 768) )
snake_case : List[Any] = torch.tensor([[-0.1482, 0.0609, 0.0322]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
snake_case : List[str] = MLukeTokenizer.from_pretrained(lowercase )
snake_case : List[Any] = "Tokyo is the capital of <mask>."
snake_case : Optional[Any] = (24, 30)
snake_case : List[str] = tokenizer(lowercase , entity_spans=[span] , return_tensors="pt" )
snake_case : Any = model(**lowercase )
snake_case : int = encoding["input_ids"][0].tolist()
snake_case : str = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
snake_case : Tuple = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase )
snake_case : Tuple = outputs.entity_logits[0][0].argmax().item()
snake_case : Optional[int] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(lowercase ) )
model.save_pretrained(lowercase )
def __lowerCAmelCase ( lowercase : Union[str, Any] ) -> Dict:
"""simple docstring"""
snake_case : Tuple = ["[MASK]", "[PAD]", "[UNK]"]
snake_case : Optional[Any] = [json.loads(lowercase ) for line in open(lowercase )]
snake_case : Any = {}
for entry in data:
snake_case : Union[str, Any] = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
snake_case : Union[str, Any] = entity_id
break
snake_case : Dict = F'{language}:{entity_name}'
snake_case : str = entity_id
return new_mapping
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
__snake_case = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 112 | 1 |
"""simple docstring"""
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( A__ , unittest.TestCase ):
'''simple docstring'''
_snake_case = CodeGenTokenizer
_snake_case = CodeGenTokenizerFast
_snake_case = True
_snake_case = {'''add_prefix_space''': True}
_snake_case = False
def A__ ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
"""<|endoftext|>""",
]
__lowerCAmelCase = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def A__ ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , **snake_case_ ) -> int:
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **snake_case_ )
def A__ ( self , snake_case_ ) -> Any:
__lowerCAmelCase = """lower newer"""
__lowerCAmelCase = """lower newer"""
return input_text, output_text
def A__ ( self ) -> Dict:
__lowerCAmelCase = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = """lower newer"""
__lowerCAmelCase = ["""\u0120low""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
__lowerCAmelCase = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def A__ ( self ) -> Dict:
if not self.test_rust_tokenizer:
return
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
__lowerCAmelCase = """lower newer"""
# Testing tokenization
__lowerCAmelCase = tokenizer.tokenize(snake_case_ , add_prefix_space=snake_case_ )
__lowerCAmelCase = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids without special tokens
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ , add_prefix_space=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing conversion to ids with special tokens
__lowerCAmelCase = self.get_rust_tokenizer(add_prefix_space=snake_case_ )
__lowerCAmelCase = tokenizer.encode(snake_case_ , add_prefix_space=snake_case_ )
__lowerCAmelCase = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# Testing the unknown token
__lowerCAmelCase = tokens + [rust_tokenizer.unk_token]
__lowerCAmelCase = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(snake_case_ ) , snake_case_ )
def A__ ( self , *snake_case_ , **snake_case_ ) -> Optional[Any]:
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def A__ ( self , snake_case_=15 ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
# Simple input
__lowerCAmelCase = """This is a simple input"""
__lowerCAmelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCAmelCase = ("""This is a simple input""", """This is a pair""")
__lowerCAmelCase = [
("""This is a simple input 1""", """This is a simple input 2"""),
("""This is a simple pair 1""", """This is a simple pair 2"""),
]
# Simple input tests
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Simple input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(snake_case_ , tokenizer_r.encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" )
# Pair input
self.assertRaises(
snake_case_ , tokenizer_r.batch_encode_plus , snake_case_ , max_length=snake_case_ , padding="""max_length""" , )
def A__ ( self ) -> Union[str, Any]:
__lowerCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token="""<pad>""" )
# Simple input
__lowerCAmelCase = """This is a simple input"""
__lowerCAmelCase = ["""This is a simple input looooooooong""", """This is a simple input"""]
__lowerCAmelCase = ("""This is a simple input""", """This is a pair""")
__lowerCAmelCase = [
("""This is a simple input loooooong""", """This is a simple input"""),
("""This is a simple pair loooooong""", """This is a simple pair"""),
]
__lowerCAmelCase = tokenizer.pad_token_id
__lowerCAmelCase = tokenizer(snake_case_ , padding="""max_length""" , max_length=30 , return_tensors="""np""" )
__lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors="""np""" )
__lowerCAmelCase = tokenizer(*snake_case_ , padding="""max_length""" , max_length=60 , return_tensors="""np""" )
__lowerCAmelCase = tokenizer(snake_case_ , padding=snake_case_ , truncate=snake_case_ , return_tensors="""np""" )
# s
# test single string max_length padding
self.assertEqual(out_s["""input_ids"""].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["""input_ids"""] )
self.assertTrue(0 in out_s["""attention_mask"""] )
# s2
# test automatic padding
self.assertEqual(out_sa["""input_ids"""].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["""input_ids"""][0] )
self.assertFalse(0 in out_sa["""attention_mask"""][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["""input_ids"""][1] )
self.assertTrue(0 in out_sa["""attention_mask"""][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["""input_ids"""].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["""input_ids"""] )
self.assertTrue(0 in out_p["""attention_mask"""] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["""input_ids"""].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["""input_ids"""][0] )
self.assertFalse(0 in out_pa["""attention_mask"""][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["""input_ids"""][1] )
self.assertTrue(0 in out_pa["""attention_mask"""][1] )
def A__ ( self ) -> int:
__lowerCAmelCase = """$$$"""
__lowerCAmelCase = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=snake_case_ , add_bos_token=snake_case_ )
__lowerCAmelCase = """This is a simple input"""
__lowerCAmelCase = ["""This is a simple input 1""", """This is a simple input 2"""]
__lowerCAmelCase = tokenizer.bos_token_id
__lowerCAmelCase = tokenizer(snake_case_ )
__lowerCAmelCase = tokenizer(snake_case_ )
self.assertEqual(out_s.input_ids[0] , snake_case_ )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
__lowerCAmelCase = tokenizer.decode(out_s.input_ids )
__lowerCAmelCase = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , snake_case_ )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def A__ ( self ) -> int:
__lowerCAmelCase = CodeGenTokenizer.from_pretrained("""Salesforce/codegen-350M-mono""" )
__lowerCAmelCase = """\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#"""
__lowerCAmelCase = """\nif len_a > len_b: result = a\nelse: result = b"""
__lowerCAmelCase = tokenizer.encode(snake_case_ )
__lowerCAmelCase = ["""^#""", re.escape("""<|endoftext|>""" ), """^'''""", """^\"\"\"""", """\n\n\n"""]
__lowerCAmelCase = tokenizer.decode(snake_case_ , truncate_before_pattern=snake_case_ )
self.assertEqual(snake_case_ , snake_case_ )
def A__ ( self ) -> str:
pass
| 301 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
SCREAMING_SNAKE_CASE_ = open # noqa: we just need to have a builtin inside this module to test it properly
| 301 | 1 |
"""simple docstring"""
def _a ( _SCREAMING_SNAKE_CASE ) -> int:
assert column_title.isupper()
snake_case_ = 0
snake_case_ = len(_SCREAMING_SNAKE_CASE ) - 1
snake_case_ = 0
while index >= 0:
snake_case_ = (ord(column_title[index] ) - 64) * pow(26 , _SCREAMING_SNAKE_CASE )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 233 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class __A :
'''simple docstring'''
def __init__( self : Any , UpperCAmelCase_ : int ) ->None:
"""simple docstring"""
snake_case_ = value
snake_case_ = None
snake_case_ = None
class __A :
'''simple docstring'''
def __init__( self : List[Any] , UpperCAmelCase_ : Node ) ->None:
"""simple docstring"""
snake_case_ = tree
def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Node | None ) ->int:
"""simple docstring"""
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : str ) ->Iterator[int]:
"""simple docstring"""
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 233 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_a : Tuple = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44 | """simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = KandinskyImgaImgPipeline
_UpperCamelCase : Optional[Any] = ["prompt", "image_embeds", "negative_image_embeds", "image"]
_UpperCamelCase : List[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
_UpperCamelCase : Dict = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
_UpperCamelCase : Union[str, Any] = False
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return 32
@property
def __A ( self ):
return self.time_input_dim
@property
def __A ( self ):
return self.time_input_dim * 4
@property
def __A ( self ):
return 100
@property
def __A ( self ):
_lowerCAmelCase : Optional[Any] = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" )
return tokenizer
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1005 , )
_lowerCAmelCase : int = MultilingualCLIP(a__ )
_lowerCAmelCase : Union[str, Any] = text_encoder.eval()
return text_encoder
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """text_image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """text_image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**a__ )
return model
@property
def __A ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __A ( self ):
torch.manual_seed(0 )
_lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs )
return model
def __A ( self ):
_lowerCAmelCase : Union[str, Any] = self.dummy_text_encoder
_lowerCAmelCase : List[Any] = self.dummy_tokenizer
_lowerCAmelCase : int = self.dummy_unet
_lowerCAmelCase : Dict = self.dummy_movq
_lowerCAmelCase : Tuple = {
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0_0_8_5,
"""beta_end""": 0.0_1_2,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase : Optional[Any] = DDIMScheduler(**a__ )
_lowerCAmelCase : List[Any] = {
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def __A ( self , a__ , a__=0 ):
_lowerCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(a__ )
# create init_image
_lowerCAmelCase : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ )
_lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(a__ ) ).convert("""RGB""" ).resize((256, 256) )
if str(a__ ).startswith("""mps""" ):
_lowerCAmelCase : List[Any] = torch.manual_seed(a__ )
else:
_lowerCAmelCase : Tuple = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCAmelCase : Optional[Any] = {
"""prompt""": """horse""",
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def __A ( self ):
_lowerCAmelCase : Any = """cpu"""
_lowerCAmelCase : int = self.get_dummy_components()
_lowerCAmelCase : int = self.pipeline_class(**a__ )
_lowerCAmelCase : Optional[int] = pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Tuple = pipe(**self.get_dummy_inputs(a__ ) )
_lowerCAmelCase : List[Any] = output.images
_lowerCAmelCase : Tuple = pipe(
**self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0]
_lowerCAmelCase : Dict = image[0, -3:, -3:, -1]
_lowerCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase : str = np.array(
[0.6_1_4_7_4_9_4_3, 0.6_0_7_3_5_3_9, 0.4_3_3_0_8_5_4_4, 0.5_9_2_8_2_6_9, 0.4_7_4_9_3_5_9_5, 0.4_6_7_5_5_9_7_3, 0.4_6_1_3_8_3_8, 0.4_5_3_6_8_7_9_7, 0.5_0_1_1_9_2_3_3] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def __A ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self ):
_lowerCAmelCase : Dict = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinsky/kandinsky_img2img_frog.npy""" )
_lowerCAmelCase : List[str] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase : Union[str, Any] = """A red cartoon frog, 4k"""
_lowerCAmelCase : int = KandinskyPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(a__ )
_lowerCAmelCase : Tuple = KandinskyImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa )
_lowerCAmelCase : Any = pipeline.to(a__ )
pipeline.set_progress_bar_config(disable=a__ )
_lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase : Dict = pipe_prior(
a__ , generator=a__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase : Union[str, Any] = pipeline(
a__ , image=a__ , image_embeds=a__ , negative_image_embeds=a__ , generator=a__ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(a__ , a__ )
| 44 | 1 |
from __future__ import annotations
import math
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> list[int]:
__snake_case = str(__SCREAMING_SNAKE_CASE )
__snake_case = [n]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def lowerCamelCase__ ( snake_case_ : int ) -> bool:
if len(str(__SCREAMING_SNAKE_CASE ) ) > 3:
if not is_prime(int(str(__SCREAMING_SNAKE_CASE )[-3:] ) ) or not is_prime(int(str(__SCREAMING_SNAKE_CASE )[:3] ) ):
return False
return True
def lowerCamelCase__ ( snake_case_ : List[str] = 11 ) -> list[int]:
__snake_case = []
__snake_case = 13
while len(__SCREAMING_SNAKE_CASE ) != count:
if validate(__SCREAMING_SNAKE_CASE ):
__snake_case = list_truncated_nums(__SCREAMING_SNAKE_CASE )
if all(is_prime(__SCREAMING_SNAKE_CASE ) for i in list_nums ):
list_truncated_primes.append(__SCREAMING_SNAKE_CASE )
num += 2
return list_truncated_primes
def lowerCamelCase__ ( ) -> int:
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F'{sum(compute_truncated_primes(11)) = }')
| 370 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
snake_case_ = logging.get_logger(__name__)
snake_case_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
snake_case_ = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
snake_case_ = {'allegro/herbert-base-cased': 514}
snake_case_ = {}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Optional[int] = PRETRAINED_INIT_CONFIGURATION
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = HerbertTokenizer
def __init__(self : Dict , a__ : Tuple=None , a__ : Optional[int]=None , a__ : List[str]=None , a__ : Optional[int]="<s>" , a__ : Optional[Any]="<unk>" , a__ : Any="<pad>" , a__ : List[Any]="<mask>" , a__ : Any="</s>" , **a__ : Tuple , ):
"""simple docstring"""
super().__init__(
a__ , a__ , tokenizer_file=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , sep_token=a__ , **a__ , )
def a (self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.cls_token_id]
__snake_case = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a (self : List[str] , a__ : List[int] , a__ : Optional[List[int]] = None , a__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a__ , token_ids_a=a__ , already_has_special_tokens=a__ )
if token_ids_a is None:
return [1] + ([0] * len(a__ )) + [1]
return [1] + ([0] * len(a__ )) + [1] + ([0] * len(a__ )) + [1]
def a (self : Optional[int] , a__ : List[int] , a__ : Optional[List[int]] = None ):
"""simple docstring"""
__snake_case = [self.sep_token_id]
__snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a (self : int , a__ : str , a__ : Optional[str] = None ):
"""simple docstring"""
__snake_case = self._tokenizer.model.save(a__ , name=a__ )
return tuple(a__ )
| 238 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A_ ( _snake_case ):
'''simple docstring'''
def __init__( self : int ) -> Dict:
UpperCAmelCase : Optional[Any] = []
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Dict , lowercase_ : Any , lowercase_ : int , **lowercase_ : Optional[int] ) -> List[Any]:
self.events.append('on_init_end' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Any , lowercase_ : str , lowercase_ : Tuple , **lowercase_ : Union[str, Any] ) -> List[Any]:
self.events.append('on_train_begin' )
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : Dict , **lowercase_ : str ) -> Tuple:
self.events.append('on_train_end' )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : int , lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> Tuple:
self.events.append('on_epoch_begin' )
def UpperCAmelCase_ ( self : str , lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : List[Any] , **lowercase_ : Tuple ) -> Optional[int]:
self.events.append('on_epoch_end' )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , **lowercase_ : int ) -> Any:
self.events.append('on_step_begin' )
def UpperCAmelCase_ ( self : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , **lowercase_ : Optional[int] ) -> Optional[int]:
self.events.append('on_step_end' )
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> List[str]:
self.events.append('on_evaluate' )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , **lowercase_ : Optional[int] ) -> Optional[int]:
self.events.append('on_predict' )
def UpperCAmelCase_ ( self : str , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict , **lowercase_ : Optional[int] ) -> Any:
self.events.append('on_save' )
def UpperCAmelCase_ ( self : str , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , **lowercase_ : Union[str, Any] ) -> int:
self.events.append('on_log' )
def UpperCAmelCase_ ( self : Any , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any] , **lowercase_ : Optional[Any] ) -> Optional[Any]:
self.events.append('on_prediction_step' )
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> Optional[int]:
UpperCAmelCase : Any = tempfile.mkdtemp()
def UpperCAmelCase_ ( self : List[str] ) -> str:
shutil.rmtree(self.output_dir )
def UpperCAmelCase_ ( self : Any , lowercase_ : List[Any]=0 , lowercase_ : Optional[Any]=0 , lowercase_ : str=64 , lowercase_ : int=64 , lowercase_ : Optional[Any]=None , lowercase_ : Tuple=False , **lowercase_ : int ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
UpperCAmelCase : Optional[int] = RegressionDataset(length=lowercase_ )
UpperCAmelCase : Dict = RegressionDataset(length=lowercase_ )
UpperCAmelCase : Any = RegressionModelConfig(a=lowercase_ , b=lowercase_ )
UpperCAmelCase : Any = RegressionPreTrainedModel(lowercase_ )
UpperCAmelCase : Dict = TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def UpperCAmelCase_ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : str ) -> Any:
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
UpperCAmelCase : Tuple = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
UpperCAmelCase : List[Any] = sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Optional[int] ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = ['on_init_end', 'on_train_begin']
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = len(trainer.get_eval_dataloader() )
UpperCAmelCase : Any = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
UpperCAmelCase : Dict = self.get_trainer()
UpperCAmelCase : str = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
UpperCAmelCase : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
UpperCAmelCase : Any = self.get_trainer(disable_tqdm=lowercase_ )
UpperCAmelCase : int = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase : Dict = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
UpperCAmelCase : Union[str, Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
UpperCAmelCase : Tuple = self.get_trainer()
UpperCAmelCase : Optional[int] = trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
UpperCAmelCase : Union[str, Any] = self.get_trainer()
UpperCAmelCase : Any = trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
UpperCAmelCase : List[Any] = self.get_trainer()
UpperCAmelCase : Any = trainer.callback_handler.callbacks[0]
UpperCAmelCase : List[str] = trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def UpperCAmelCase_ ( self : int ) -> Optional[Any]:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=lowercase_ )
UpperCAmelCase : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
UpperCAmelCase : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
UpperCAmelCase : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
UpperCAmelCase : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
UpperCAmelCase : List[str] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
UpperCAmelCase : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
UpperCAmelCase : Union[str, Any] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
UpperCAmelCase : int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
UpperCAmelCase : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 151 |
'''simple docstring'''
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class A_ :
'''simple docstring'''
UpperCAmelCase_ : Optional[Union[str, Path]] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : Optional[Dict] = None
UpperCAmelCase_ : Optional[str] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : bool = True
UpperCAmelCase_ : Optional[int] = None
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Optional[Union[str, bool]] = None
UpperCAmelCase_ : bool = False
UpperCAmelCase_ : Optional[Dict] = None
UpperCAmelCase_ : Optional[str] = None
def UpperCAmelCase_ ( self : Tuple ) -> "DownloadConfig":
return self.__class__(**{k: copy.deepcopy(lowercase_ ) for k, v in self.__dict__.items()} )
| 151 | 1 |
"""simple docstring"""
from manim import *
class UpperCAmelCase_ ( _lowercase):
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
_UpperCamelCase = Rectangle(height=0.5 , width=0.5 )
_UpperCamelCase = Rectangle(height=0.2_5 , width=0.2_5 )
_UpperCamelCase = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
_UpperCamelCase = [mem.copy() for i in range(6 )]
_UpperCamelCase = [mem.copy() for i in range(6 )]
_UpperCamelCase = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
_UpperCamelCase = Text('''CPU''' , font_size=24 )
_UpperCamelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__a )
_UpperCamelCase = [mem.copy() for i in range(4 )]
_UpperCamelCase = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase = Text('''GPU''' , font_size=24 )
_UpperCamelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
gpu.move_to([-1, -1, 0] )
self.add(__a )
_UpperCamelCase = [mem.copy() for i in range(6 )]
_UpperCamelCase = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase = Text('''Model''' , font_size=24 )
_UpperCamelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
model.move_to([3, -1.0, 0] )
self.add(__a )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for i, rect in enumerate(__a ):
rect.set_stroke(__a )
_UpperCamelCase = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(__a , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=__a )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(model_cpu_arr[0] , direction=__a , buff=0.0 )
else:
cpu_target.next_to(model_cpu_arr[i - 1] , direction=__a , buff=0.0 )
self.add(__a )
model_cpu_arr.append(__a )
self.add(*__a , *__a , *__a )
_UpperCamelCase = [mem.copy() for i in range(6 )]
_UpperCamelCase = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase = Text('''Loaded Checkpoint''' , font_size=24 )
_UpperCamelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
checkpoint.move_to([3, 0.5, 0] )
self.add(__a )
_UpperCamelCase = []
_UpperCamelCase = []
for i, rect in enumerate(__a ):
_UpperCamelCase = fill.copy().set_fill(__a , opacity=0.7 )
target.move_to(__a )
ckpt_arr.append(__a )
_UpperCamelCase = target.copy()
if i < 5:
cpu_target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.move_to(cpu_right_col_base[i - 5] )
ckpt_cpu_arr.append(__a )
self.add(*__a , *__a )
_UpperCamelCase = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_UpperCamelCase = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>โ</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__a , __a )
_UpperCamelCase = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>โ</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(__a , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(__a )
_UpperCamelCase = MarkupText(
F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
_UpperCamelCase = [meta_mem.copy() for i in range(6 )]
_UpperCamelCase = [meta_mem.copy() for i in range(6 )]
_UpperCamelCase = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase = VGroup(*__a ).arrange(__a , buff=0 )
_UpperCamelCase = VGroup(__a , __a ).arrange(__a , buff=0 )
_UpperCamelCase = Text('''Disk''' , font_size=24 )
_UpperCamelCase = Group(__a , __a ).arrange(__a , buff=0.5 , aligned_edge=__a )
disk.move_to([-4.0, -1.2_5, 0] )
self.play(Write(__a , run_time=3 ) , Write(__a , run_time=1 ) , Create(__a , run_time=1 ) )
_UpperCamelCase = []
for i, rect in enumerate(__a ):
_UpperCamelCase = rect.copy()
target.generate_target()
target.target.move_to(disk_left_col_base[i] ).scale(0.5 )
animations.append(MoveToTarget(__a , run_time=1.5 ) )
self.play(*__a )
self.play(FadeOut(__a ) )
_UpperCamelCase = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(__a , run_time=3 ) )
self.play(
FadeOut(__a , __a , *__a , *__a ) , )
self.wait()
| 361 | """simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 54 | 0 |
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = 3
__SCREAMING_SNAKE_CASE : Tuple = 2_5_0
__SCREAMING_SNAKE_CASE : Tuple = ids_tensor((batch_size, length) , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self._get_tensors(5 )
__SCREAMING_SNAKE_CASE : str = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = MaxLengthCriteria(max_length=1_0 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self._get_tensors(5 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self._get_tensors(1_0 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def UpperCamelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self._get_tensors(5 )
__SCREAMING_SNAKE_CASE : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Optional[int] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) )
def UpperCamelCase__ ( self : List[Any] ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(lowerCAmelCase__ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
__SCREAMING_SNAKE_CASE : Any = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(lowerCAmelCase__ ) , 1 ) | 112 |
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : str = int(_lowerCamelCase )
if n_element < 1:
__SCREAMING_SNAKE_CASE : List[str] = ValueError("""a should be a positive number""" )
raise my_error
__SCREAMING_SNAKE_CASE : List[Any] = [1]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = (0, 0, 0)
__SCREAMING_SNAKE_CASE : List[str] = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = input('''Enter the last number (nth term) of the Hamming Number Series: ''')
print('''Formula of Hamming Number Series => 2^i * 3^j * 5^k''')
UpperCamelCase__ : List[str] = hamming(int(n))
print('''-----------------------------------------------------''')
print(f"The list with nth numbers is: {hamming_numbers}")
print('''-----------------------------------------------------''') | 112 | 1 |
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self : List[Any] , a : Optional[int] , a : Tuple=13 , a : Union[str, Any]=7 , a : str=True , a : Optional[Any]=True , a : int=True , a : int=True , a : Tuple=99 , a : List[str]=64 , a : str=32 , a : Optional[Any]=5 , a : List[Any]=4 , a : Tuple=37 , a : str="gelu" , a : Union[str, Any]=0.1 , a : Dict=0.1 , a : List[Any]=512 , a : Optional[int]=16 , a : Optional[Any]=2 , a : Dict=0.0_2 , a : int=3 , a : Optional[Any]=4 , a : Union[str, Any]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : int = seq_length
lowerCAmelCase__ : Union[str, Any] = is_training
lowerCAmelCase__ : Optional[Any] = use_input_mask
lowerCAmelCase__ : List[str] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : int = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : str = embedding_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[str] = intermediate_size
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : Union[str, Any] = type_vocab_size
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[str] = num_labels
lowerCAmelCase__ : List[str] = num_choices
lowerCAmelCase__ : Any = scope
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCAmelCase__ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : int = None
if self.use_token_type_ids:
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Any , a : str , a : Any , a : Union[str, Any] , a : Any , a : List[str] , a : Tuple , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = MobileBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , token_type_ids=a )
lowerCAmelCase__ : Any = model(a , token_type_ids=a )
lowerCAmelCase__ : int = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : Optional[Any] , a : Any , a : List[str] , a : Optional[Any] , a : List[str] , a : List[str] , a : List[Any] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = MobileBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : Tuple , a : Optional[int] , a : Dict , a : List[Any] , a : List[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = MobileBertForNextSentencePrediction(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : str , a : str , a : Union[str, Any] , a : List[Any] , a : List[str] , a : Any , a : Any , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = MobileBertForPreTraining(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : Tuple , a : Dict , a : List[str] , a : Any , a : int , a : List[str] , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MobileBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : List[str] , a : List[str] , a : List[str] , a : List[Any] , a : Dict , a : List[Any] , a : Tuple , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : int = MobileBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : str , a : Any , a : List[Any] , a : Optional[int] , a : List[Any] , a : Optional[int] , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.num_labels
lowerCAmelCase__ : Any = MobileBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : Any , a : Union[str, Any] , a : Optional[int] , a : Any , a : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.num_choices
lowerCAmelCase__ : List[Any] = MobileBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
lowerCAmelCase__
) : List[str] = config_and_inputs
lowerCAmelCase__ : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _lowerCamelCase ( self : Optional[Any] , a : Dict , a : Optional[Any] , a : str=False ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
lowerCAmelCase__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
lowerCAmelCase__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = MobileBertModelTester(self )
lowerCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*a )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return torch.tensor(
SCREAMING_SNAKE_CASE_ , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(a )
lowerCAmelCase__ : Any = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(a )[0]
lowerCAmelCase__ : Dict = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Any = torch.tensor(
[
[
[-2.4736526E07, 8.2691656E04, 1.6521838E05],
[-5.7541704E-01, 3.9056022E00, 4.4011507E00],
[2.6047359E00, 1.5677652E00, -1.7324188E-01],
]
] , device=a , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
lowerCAmelCase__ : List[Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
lowerCAmelCase__ : Optional[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 369 |
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] , *a : Optional[int] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : List[Any] , **a : int ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Any , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Optional[int] , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : List[Any] , *a : List[str] , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[Any] , *a : Union[str, Any] , **a : Optional[int] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : List[Any] , *a : Dict , **a : List[str] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Dict , **a : List[Any] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : List[str] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : Dict , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : Any , **a : Any ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Any , *a : List[Any] , **a : str ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class A__ ( metaclass=__magic_name__ ):
lowercase = ['torch', 'transformers', 'onnx']
def __init__( self : str , *a : Union[str, Any] , **a : Optional[Any] ):
'''simple docstring'''
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : int , *a : Union[str, Any] , **a : Dict ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , *a : Tuple , **a : List[str] ):
'''simple docstring'''
requires_backends(cls , ['torch', 'transformers', 'onnx'] ) | 307 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
lowerCamelCase : int = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : Optional[str] = field(
default='''tab_fact''' , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_A : Optional[str] = field(
default='''tab_fact''' , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} , )
_A : int = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_A : bool = field(
default=__a , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_A : Optional[int] = field(
default=__a , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''A csv or a json file containing the training data.'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''A csv or a json file containing the validation data.'''} )
_A : Optional[str] = field(default=__a , metadata={'''help''': '''A csv or a json file containing the test data.'''} )
def lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__lowercase : str = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__lowercase : List[str] = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowerCAmelCase :
'''simple docstring'''
_A : str = field(
default=__a , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=__a , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_A : bool = field(
default=__a , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_A : bool = field(
default=__a , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def snake_case_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowercase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__lowercase , __lowercase , __lowercase : Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__lowercase , __lowercase , __lowercase : Any = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__lowercase : str = training_args.get_process_log_level()
logger.setLevel(lowerCAmelCase_ )
datasets.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.set_verbosity(lowerCAmelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
__lowercase : str = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__lowercase : List[Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. "
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__lowercase : Dict = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__lowercase : Tuple = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__lowercase : Optional[Any] = data_args.train_file.split(""".""" )[-1]
__lowercase : int = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__lowercase : Union[str, Any] = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(F"load a local file for {key}: {data_files[key]}" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__lowercase : str = load_dataset("""csv""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__lowercase : List[Any] = load_dataset("""json""" , data_files=lowerCAmelCase_ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__lowercase : List[Any] = raw_datasets["""train"""].features["""label"""].names
__lowercase : Optional[Any] = len(lowerCAmelCase_ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowercase : Any = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__lowercase : int = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=lowerCAmelCase_ , )
__lowercase : List[Any] = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__lowercase : Optional[Any] = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__lowercase : Union[str, Any] = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__lowercase : str = {"""Refused""": 0, """Entailed""": 1}
__lowercase : Tuple = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." )
__lowercase : Tuple = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(lowerCAmelCase_ : int ):
# Tokenize the texts
def _convert_table_text_to_pandas(lowerCAmelCase_ : List[Any] ):
__lowercase : str = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__lowercase : str = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__lowercase : Any = examples["""statement"""]
__lowercase : Tuple = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__lowercase : Union[str, Any] = tokenizer(lowerCAmelCase_ , lowerCAmelCase_ , padding=lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ )
__lowercase : List[Any] = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__lowercase : int = raw_datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__lowercase : List[str] = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__lowercase : List[Any] = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__lowercase : List[str] = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__lowercase : str = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__lowercase : Optional[Any] = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__lowercase : int = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(lowerCAmelCase_ ) ) , 3 ):
logger.info(F"Sample {index} of the training set: {train_dataset[index]}." )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(lowerCAmelCase_ : EvalPrediction ):
__lowercase : int = p.predictions[0] if isinstance(p.predictions , lowerCAmelCase_ ) else p.predictions
__lowercase : Union[str, Any] = np.argmax(lowerCAmelCase_ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__lowercase : int = default_data_collator
elif training_args.fpaa:
__lowercase : List[Any] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 )
else:
__lowercase : int = None
# Initialize our Trainer
__lowercase : List[str] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
__lowercase : List[Any] = None
if training_args.resume_from_checkpoint is not None:
__lowercase : Union[str, Any] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__lowercase : Dict = last_checkpoint
__lowercase : Any = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
__lowercase : Optional[int] = train_result.metrics
__lowercase : Union[str, Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(lowerCAmelCase_ )
)
__lowercase : List[Any] = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , lowerCAmelCase_ )
trainer.save_metrics("""train""" , lowerCAmelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowercase : List[str] = trainer.evaluate(eval_dataset=lowerCAmelCase_ )
__lowercase : Any = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(lowerCAmelCase_ )
__lowercase : Dict = min(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
trainer.log_metrics("""eval""" , lowerCAmelCase_ )
trainer.save_metrics("""eval""" , lowerCAmelCase_ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__lowercase : Optional[Any] = predict_dataset.remove_columns("""label""" )
__lowercase : Optional[int] = trainer.predict(lowerCAmelCase_ , metric_key_prefix="""predict""" ).predictions
__lowercase : Dict = np.argmax(lowerCAmelCase_ , axis=1 )
__lowercase : int = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(lowerCAmelCase_ ):
__lowercase : Tuple = label_list[item]
writer.write(F"{index}\t{item}\n" )
__lowercase : Union[str, Any] = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**lowerCAmelCase_ )
else:
trainer.create_model_card(**lowerCAmelCase_ )
def snake_case_ ( lowerCAmelCase_ : Any ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 233 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : List[Any] , __a : int , __a : Tuple=13 , __a : int=32 , __a : List[Any]=3 , __a : Dict=4 , __a : List[str]=[10, 20, 30, 40] , __a : Any=[2, 2, 3, 2] , __a : List[str]=True , __a : Any=True , __a : Optional[Any]=37 , __a : Dict="gelu" , __a : Tuple=10 , __a : Dict=0.02 , __a : Optional[Any]=["stage2", "stage3", "stage4"] , __a : Optional[int]=[2, 3, 4] , __a : int=None , ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = parent
__lowercase : str = batch_size
__lowercase : List[str] = image_size
__lowercase : int = num_channels
__lowercase : Optional[int] = num_stages
__lowercase : str = hidden_sizes
__lowercase : Dict = depths
__lowercase : List[str] = is_training
__lowercase : Optional[Any] = use_labels
__lowercase : Optional[Any] = intermediate_size
__lowercase : Tuple = hidden_act
__lowercase : List[Any] = num_labels
__lowercase : Dict = initializer_range
__lowercase : List[str] = out_features
__lowercase : str = out_indices
__lowercase : Optional[Any] = scope
def lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
__lowercase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase : Optional[int] = None
if self.use_labels:
__lowercase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowercase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def lowerCAmelCase ( self : int , __a : Tuple , __a : Dict , __a : List[str] ) -> Dict:
"""simple docstring"""
__lowercase : str = ConvNextVaModel(config=__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase ( self : Any , __a : Tuple , __a : Any , __a : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
__lowercase : int = ConvNextVaForImageClassification(__a )
model.to(__a )
model.eval()
__lowercase : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase ( self : int , __a : str , __a : Any , __a : Optional[int] ) -> Dict:
"""simple docstring"""
__lowercase : Union[str, Any] = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
__lowercase : Dict = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__lowercase : List[str] = None
__lowercase : str = ConvNextVaBackbone(config=__a )
model.to(__a )
model.eval()
__lowercase : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
__lowercase : Any = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : Optional[int] = config_and_inputs
__lowercase : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
__lowercase : Dict = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase : Optional[Any] = config_and_inputs
__lowercase : int = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase ( __a , __a , unittest.TestCase ):
'''simple docstring'''
_A : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_A : Any = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_A : str = False
_A : Dict = False
_A : str = False
_A : str = False
_A : Dict = False
def lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowercase : List[Any] = ConvNextVaModelTester(self )
__lowercase : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : str = True
if model_class.__name__ in [
*get_values(__a ),
*get_values(__a ),
]:
continue
__lowercase : Union[str, Any] = model_class(__a )
model.to(__a )
model.train()
__lowercase : Tuple = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : Dict = model(**__a ).loss
loss.backward()
def lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__lowercase , __lowercase : Tuple = self.model_tester.prepare_config_and_inputs_with_labels()
__lowercase : Dict = False
__lowercase : Optional[Any] = True
if (
model_class.__name__
in [*get_values(__a ), *get_values(__a )]
or not model_class.supports_gradient_checkpointing
):
continue
__lowercase : List[Any] = model_class(__a )
model.to(__a )
model.gradient_checkpointing_enable()
model.train()
__lowercase : List[str] = self._prepare_for_class(__a , __a , return_labels=__a )
__lowercase : int = model(**__a ).loss
loss.backward()
def lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
__lowercase , __lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : int = model_class(__a )
__lowercase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase : str = [*signature.parameters.keys()]
__lowercase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __a )
def lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__a : Any , __a : Tuple , __a : Tuple ):
__lowercase : Optional[Any] = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__lowercase : int = model(**self._prepare_for_class(__a , __a ) )
__lowercase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowercase : Any = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowercase , __lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase : int = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase : Optional[Any] = True
check_hidden_states_output(__a , __a , __a )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = ConvNextVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def snake_case_ ( ):
__lowercase : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
__lowercase : Optional[Any] = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(__a )
__lowercase : Dict = self.default_image_processor
__lowercase : Any = prepare_img()
__lowercase : int = preprocessor(images=__a , return_tensors="""pt""" ).to(__a )
# forward pass
with torch.no_grad():
__lowercase : int = model(**__a )
# verify the logits
__lowercase : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__lowercase : Tuple = torch.tensor([0.9996, 0.1966, -0.4386] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) ) | 233 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
UpperCAmelCase_ = None
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase_ = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
UpperCAmelCase_ = {
"""t5-small""": 512,
"""t5-base""": 512,
"""t5-large""": 512,
"""t5-3b""": 512,
"""t5-11b""": 512,
}
class UpperCamelCase_ ( _lowerCamelCase ):
lowerCAmelCase_ = VOCAB_FILES_NAMES
lowerCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ = ['''input_ids''', '''attention_mask''']
lowerCAmelCase_ = TaTokenizer
lowerCAmelCase_ = []
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="</s>" , lowerCAmelCase_="<unk>" , lowerCAmelCase_="<pad>" , lowerCAmelCase_=100 , lowerCAmelCase_=None , **lowerCAmelCase_ , ) -> Dict:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
_snake_case = [F'''<extra_id_{i}>''' for i in range(lowerCAmelCase_ )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
_snake_case = len(set(filter(lambda lowerCAmelCase_ : bool('extra_id_' in str(lowerCAmelCase_ ) ) , lowerCAmelCase_ ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F'''Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are'''
' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'
' tokens' )
super().__init__(
lowerCAmelCase_ , tokenizer_file=lowerCAmelCase_ , eos_token=lowerCAmelCase_ , unk_token=lowerCAmelCase_ , pad_token=lowerCAmelCase_ , extra_ids=lowerCAmelCase_ , additional_special_tokens=lowerCAmelCase_ , **lowerCAmelCase_ , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
_snake_case = extra_ids
@staticmethod
def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
_snake_case = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'This tokenizer was incorrectly instantiated with a model max length of'
F''' {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this'''
' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'
' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'
F''' {pretrained_model_name_or_path} automatically truncating your input to'''
F''' {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences'''
F''' longer than {deprecated_max_model_length} you can either instantiate this tokenizer with'''
' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'
' instantiate this tokenizer with `model_max_length` set to your preferred value.' , lowerCAmelCase_ , )
return max_model_length
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowerCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCAmelCase_ ):
copyfile(self.vocab_file , lowerCAmelCase_ )
logger.info(F'''Copy vocab file to {out_vocab_file}''' )
return (out_vocab_file,)
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
_snake_case = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = None ) -> List[int]:
_snake_case = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def lowerCAmelCase ( self ) -> Any:
return list(
set(filter(lambda lowerCAmelCase_ : bool(re.search(r'<extra_id_\d+>' , lowerCAmelCase_ ) ) is not None , self.additional_special_tokens ) ) )
def lowerCAmelCase ( self ) -> Dict:
return [self.convert_tokens_to_ids(lowerCAmelCase_ ) for token in self.get_sentinel_tokens()]
| 295 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
@register_to_config
def __init__( self , lowerCAmelCase_ = 128 , lowerCAmelCase_ = 256 , lowerCAmelCase_ = 20_00.0 , lowerCAmelCase_ = 768 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 12 , lowerCAmelCase_ = 64 , lowerCAmelCase_ = 2048 , lowerCAmelCase_ = 0.1 , ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Sequential(
nn.Linear(lowerCAmelCase_ , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase_ ) , nn.SiLU() , )
_snake_case = nn.Embedding(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = False
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.ModuleList()
for lyr_num in range(lowerCAmelCase_ ):
# FiLM conditional T5 decoder
_snake_case = DecoderLayer(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
self.decoders.append(lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = nn.Dropout(p=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_snake_case = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case , _snake_case , _snake_case = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
_snake_case = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
_snake_case = self.conditioning_emb(lowerCAmelCase_ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
_snake_case = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
_snake_case = torch.broadcast_to(
torch.arange(lowerCAmelCase_ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
_snake_case = self.position_encoding(lowerCAmelCase_ )
_snake_case = self.continuous_inputs_projection(lowerCAmelCase_ )
inputs += position_encodings
_snake_case = self.dropout(lowerCAmelCase_ )
# decoder: No padding present.
_snake_case = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
_snake_case = [(x, self.encoder_decoder_mask(lowerCAmelCase_ , lowerCAmelCase_ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
_snake_case = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
_snake_case = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
_snake_case = lyr(
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )[0]
_snake_case = self.decoder_norm(lowerCAmelCase_ )
_snake_case = self.post_dropout(lowerCAmelCase_ )
_snake_case = self.spec_out(lowerCAmelCase_ )
return spec_out
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> Tuple:
super().__init__()
_snake_case = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase_ , d_kv=lowerCAmelCase_ , num_heads=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ , layer_norm_epsilon=lowerCAmelCase_ ) )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Tuple:
_snake_case = self.layer[0](
lowerCAmelCase_ , conditioning_emb=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
if encoder_hidden_states is not None:
_snake_case = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
_snake_case = self.layer[1](
lowerCAmelCase_ , key_value_states=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , )
# Apply Film Conditional Feed Forward layer
_snake_case = self.layer[-1](lowerCAmelCase_ , lowerCAmelCase_ )
return (hidden_states,)
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
super().__init__()
_snake_case = TaLayerNorm(lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> str:
# pre_self_attention_layer_norm
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.FiLMLayer(lowerCAmelCase_ , lowerCAmelCase_ )
# Self-attention block
_snake_case = self.attention(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
super().__init__()
_snake_case = Attention(query_dim=lowerCAmelCase_ , heads=lowerCAmelCase_ , dim_head=lowerCAmelCase_ , out_bias=lowerCAmelCase_ , scale_qk=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , ) -> Dict:
_snake_case = self.layer_norm(lowerCAmelCase_ )
_snake_case = self.attention(
lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , attention_mask=attention_mask.squeeze(1 ) , )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return layer_output
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
super().__init__()
_snake_case = TaDenseGatedActDense(d_model=lowerCAmelCase_ , d_ff=lowerCAmelCase_ , dropout_rate=lowerCAmelCase_ )
_snake_case = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase_ )
_snake_case = TaLayerNorm(lowerCAmelCase_ , eps=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ) -> Union[str, Any]:
_snake_case = self.layer_norm(lowerCAmelCase_ )
if conditioning_emb is not None:
_snake_case = self.film(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case = self.DenseReluDense(lowerCAmelCase_ )
_snake_case = hidden_states + self.dropout(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Union[str, Any]:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Linear(lowerCAmelCase_ , lowerCAmelCase_ , bias=lowerCAmelCase_ )
_snake_case = nn.Dropout(lowerCAmelCase_ )
_snake_case = NewGELUActivation()
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> Any:
_snake_case = self.act(self.wi_a(lowerCAmelCase_ ) )
_snake_case = self.wi_a(lowerCAmelCase_ )
_snake_case = hidden_gelu * hidden_linear
_snake_case = self.dropout(lowerCAmelCase_ )
_snake_case = self.wo(lowerCAmelCase_ )
return hidden_states
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=1E-6 ) -> str:
super().__init__()
_snake_case = nn.Parameter(torch.ones(lowerCAmelCase_ ) )
_snake_case = eps
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> int:
# T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean
# Square Layer Normalization https://arxiv.org/abs/1910.07467 thus variance is calculated
# w/o mean and there is no bias. Additionally we want to make sure that the accumulation for
# half-precision inputs is done in fp32
_snake_case = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase_ )
_snake_case = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
_snake_case = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCamelCase_ ( nn.Module ):
def lowerCAmelCase ( self , lowerCAmelCase_ ) -> torch.Tensor:
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase_ , 3.0 )) ))
class UpperCamelCase_ ( nn.Module ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
super().__init__()
_snake_case = nn.Linear(lowerCAmelCase_ , out_features * 2 , bias=lowerCAmelCase_ )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
_snake_case = self.scale_bias(lowerCAmelCase_ )
_snake_case , _snake_case = torch.chunk(lowerCAmelCase_ , 2 , -1 )
_snake_case = x * (1 + scale) + shift
return x
| 295 | 1 |
"""simple docstring"""
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE (*a_ , **a_ ):
'''simple docstring'''
pass
def lowercase ( _snake_case : Image ) ->str:
"""simple docstring"""
__snake_case : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_ ):
'''simple docstring'''
__snake_case : str = DepthEstimationPipeline(model=a_ , image_processor=a_ )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def SCREAMING_SNAKE_CASE (self , a_ , a_ ):
'''simple docstring'''
__snake_case : List[Any] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , a_ )
import datasets
__snake_case : List[Any] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__snake_case : List[str] = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , a_ , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
pass
@slow
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = '''Intel/dpt-large'''
__snake_case : List[Any] = pipeline('''depth-estimation''' , model=a_ )
__snake_case : Optional[int] = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
__snake_case : Optional[Any] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 102 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase : List[Any] = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 238 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 361 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_A : str ={
'''sample_size''': 32,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [32, 64],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Union[str, Any] ={
'''sample_size''': 64,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_000,
'''block_out_channels''': [192, 192 * 2, 192 * 3, 192 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''sample_size''': 256,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'''attention_head_dim''': 64,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_A : Dict ={
'''num_train_timesteps''': 40,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : str ={
'''num_train_timesteps''': 201,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_A : int ={
'''num_train_timesteps''': 151,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if isinstance(UpperCamelCase , UpperCamelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""" )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False ) -> Any:
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
lowerCamelCase__ : Optional[Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
lowerCamelCase__ : int = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
lowerCamelCase__ : Tuple = checkpoint[f'''{old_prefix}.skip_connection.weight''']
lowerCamelCase__ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=None ) -> str:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : str = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
lowerCamelCase__ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
lowerCamelCase__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias''']
lowerCamelCase__ : List[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Any = weight_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = bias_k.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Dict = weight_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Union[str, Any] = bias_v.squeeze(-1 ).squeeze(-1 )
lowerCamelCase__ : Optional[Any] = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
lowerCamelCase__ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> List[Any]:
lowerCamelCase__ : str = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCamelCase__ : Optional[int] = {}
lowerCamelCase__ : Optional[int] = checkpoint["""time_embed.0.weight"""]
lowerCamelCase__ : List[Any] = checkpoint["""time_embed.0.bias"""]
lowerCamelCase__ : int = checkpoint["""time_embed.2.weight"""]
lowerCamelCase__ : Optional[Any] = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
lowerCamelCase__ : Optional[Any] = checkpoint["""label_emb.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""input_blocks.0.0.weight"""]
lowerCamelCase__ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
lowerCamelCase__ : Optional[Any] = unet_config["""down_block_types"""]
lowerCamelCase__ : Any = unet_config["""layers_per_block"""]
lowerCamelCase__ : Any = unet_config["""attention_head_dim"""]
lowerCamelCase__ : List[Any] = unet_config["""block_out_channels"""]
lowerCamelCase__ : str = 1
lowerCamelCase__ : str = channels_list[0]
for i, layer_type in enumerate(UpperCamelCase ):
lowerCamelCase__ : List[Any] = channels_list[i]
lowerCamelCase__ : List[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : int = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Tuple = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : List[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(UpperCamelCase ):
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : str = True if j == 0 and downsample_block_has_skip else False
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Any = f'''down_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Dict = f'''input_blocks.{current_layer}.1'''
lowerCamelCase__ : Tuple = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''down_blocks.{i}.downsamplers.0'''
lowerCamelCase__ : str = f'''input_blocks.{current_layer}.0'''
lowerCamelCase__ : Union[str, Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
lowerCamelCase__ : Union[str, Any] = current_channels
# hardcoded the mid-block for now
lowerCamelCase__ : Any = """mid_block.resnets.0"""
lowerCamelCase__ : Optional[Any] = """middle_block.0"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : List[Any] = """mid_block.attentions.0"""
lowerCamelCase__ : Dict = """middle_block.1"""
lowerCamelCase__ : int = convert_attention(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Any = """mid_block.resnets.1"""
lowerCamelCase__ : Tuple = """middle_block.2"""
lowerCamelCase__ : int = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Any = unet_config["""up_block_types"""]
for i, layer_type in enumerate(UpperCamelCase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : int = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : Optional[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Any = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Dict = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : List[str] = f'''output_blocks.{current_layer-1}.1'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
lowerCamelCase__ : str = f'''up_blocks.{i}.resnets.{j}'''
lowerCamelCase__ : List[Any] = f'''output_blocks.{current_layer}.0'''
lowerCamelCase__ : Optional[Any] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , has_skip=UpperCamelCase )
lowerCamelCase__ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
lowerCamelCase__ : Any = f'''output_blocks.{current_layer}.1'''
lowerCamelCase__ : Optional[int] = convert_attention(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
current_layer += 1
if i != len(UpperCamelCase ) - 1:
lowerCamelCase__ : Tuple = f'''up_blocks.{i}.upsamplers.0'''
lowerCamelCase__ : Tuple = f'''output_blocks.{current_layer-1}.2'''
lowerCamelCase__ : List[str] = convert_resnet(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : Dict = checkpoint["""out.0.weight"""]
lowerCamelCase__ : Dict = checkpoint["""out.0.bias"""]
lowerCamelCase__ : Dict = checkpoint["""out.2.weight"""]
lowerCamelCase__ : Tuple = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
_A : Tuple =argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_A : Tuple =parser.parse_args()
_A : Optional[int] =strabool(args.class_cond)
_A : List[str] =os.path.basename(args.unet_path)
print(F'Checkpoint: {ckpt_name}')
# Get U-Net config
if "imagenet64" in ckpt_name:
_A : int =IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Tuple =LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_A : Any =TEST_UNET_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
if not args.class_cond:
_A : str =None
_A : Optional[int] =con_pt_to_diffuser(args.unet_path, unet_config)
_A : Optional[int] =UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_A : Tuple =CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_A : int =CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_A : Union[str, Any] =CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F'Checkpoint type {ckpt_name} is not currently supported.')
_A : str =CMStochasticIterativeScheduler(**scheduler_config)
_A : Optional[Any] =ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 129 | 0 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCAmelCase__ : List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
| 98 |
"""simple docstring"""
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a__ : Union[str, Any] = logging.get_logger(__name__)
def UpperCAmelCase__ ():
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.getenv("SM_HP_MP_PARAMETERS" , "{}" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__SCREAMING_SNAKE_CASE = os.getenv("SM_FRAMEWORK_PARAMS" , "{}" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__SCREAMING_SNAKE_CASE = json.loads(lowerCAmelCase_ )
if not mpi_options.get("sagemaker_mpi_enabled" , lowerCAmelCase_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("smdistributed" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : str = field(
default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , )
def UpperCAmelCase_ ( self : List[str] ) -> Any:
super().__post_init__()
warnings.warn(
"`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use "
"`TrainingArguments` instead." , UpperCAmelCase__ , )
@cached_property
def UpperCAmelCase_ ( self : List[str] ) -> "torch.device":
logger.info("PyTorch: setting up devices" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"torch.distributed process group is initialized, but local_rank == -1. "
"In order to use Torch DDP, launch your script with `python -m torch.distributed.launch" )
if self.no_cuda:
__SCREAMING_SNAKE_CASE = torch.device("cpu" )
__SCREAMING_SNAKE_CASE = 0
elif is_sagemaker_model_parallel_available():
__SCREAMING_SNAKE_CASE = smp.local_rank()
__SCREAMING_SNAKE_CASE = torch.device("cuda" , UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="smddp" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = int(os.getenv("SMDATAPARALLEL_LOCAL_RANK" ) )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__SCREAMING_SNAKE_CASE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="nccl" , timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = torch.device("cuda" , self.local_rank )
__SCREAMING_SNAKE_CASE = 1
if device.type == "cuda":
torch.cuda.set_device(UpperCAmelCase__ )
return device
@property
def UpperCAmelCase_ ( self : Dict ) -> Any:
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[Any]:
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase_ ( self : Tuple ) -> int:
return False
| 54 | 0 |
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class __snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self : List[Any] ):
"""simple docstring"""
super().__init__()
SCREAMING_SNAKE_CASE__ = nn.Linear(3 , 4 )
SCREAMING_SNAKE_CASE__ = nn.BatchNormad(4 )
SCREAMING_SNAKE_CASE__ = nn.Linear(4 , 5 )
def __a ( self : int , _lowercase : Optional[int] ):
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowercase , model.state_dict() )
SCREAMING_SNAKE_CASE__ = os.path.join(_lowercase , """index.json""" )
self.assertTrue(os.path.isfile(_lowercase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
SCREAMING_SNAKE_CASE__ = os.path.join(_lowercase , f"""{key}.dat""" )
self.assertTrue(os.path.isfile(_lowercase ) )
# TODO: add tests on the fact weights are properly loaded
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
SCREAMING_SNAKE_CASE__ = torch.randn(2 , 3 , dtype=_lowercase )
with TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ = offload_weight(_lowercase , """weight""" , _lowercase , {} )
SCREAMING_SNAKE_CASE__ = os.path.join(_lowercase , """weight.dat""" )
self.assertTrue(os.path.isfile(_lowercase ) )
self.assertDictEqual(_lowercase , {"""weight""": {"""shape""": [2, 3], """dtype""": str(_lowercase ).split(""".""" )[1]}} )
SCREAMING_SNAKE_CASE__ = load_offloaded_weight(_lowercase , index["""weight"""] )
self.assertTrue(torch.equal(_lowercase , _lowercase ) )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ModelForTest()
SCREAMING_SNAKE_CASE__ = model.state_dict()
SCREAMING_SNAKE_CASE__ = {k: v for k, v in state_dict.items() if """linear2""" not in k}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = OffloadedWeightsLoader(state_dict=_lowercase , save_folder=_lowercase )
# Every key is there with the right value
self.assertEqual(sorted(_lowercase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowercase , weight_map[key] ) )
SCREAMING_SNAKE_CASE__ = {k: v for k, v in state_dict.items() if """weight""" in k}
SCREAMING_SNAKE_CASE__ = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ = OffloadedWeightsLoader(state_dict=_lowercase , save_folder=_lowercase )
# Every key is there with the right value
self.assertEqual(sorted(_lowercase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowercase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(_lowercase , _lowercase )
# Duplicates are removed
SCREAMING_SNAKE_CASE__ = OffloadedWeightsLoader(state_dict=_lowercase , save_folder=_lowercase )
# Every key is there with the right value
self.assertEqual(sorted(_lowercase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(_lowercase , weight_map[key] ) )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
SCREAMING_SNAKE_CASE__ = extract_submodules_state_dict(_lowercase , ["""a.1""", """a.2"""] )
self.assertDictEqual(_lowercase , {"""a.1""": 0, """a.2""": 2} )
SCREAMING_SNAKE_CASE__ = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
SCREAMING_SNAKE_CASE__ = extract_submodules_state_dict(_lowercase , ["""a.1""", """a.2"""] )
self.assertDictEqual(_lowercase , {"""a.1.a""": 0, """a.2.a""": 2} )
| 357 | from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 204 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _snake_case ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self: int ) -> List[Any]:
UpperCAmelCase_ : str = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
UpperCAmelCase_ : Dict = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def A__ ( self: Tuple ) -> Optional[int]:
UpperCAmelCase_ : str = F'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCAmelCase_ : str = [sys.executable] + distributed_args
execute_subprocess_async(lowerCamelCase_ ,env=os.environ.copy() )
| 345 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCamelCase : int = 299792458
# Symbols
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""")
def a_ ( _A ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def a_ ( _A ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(_A ) ** 2 )
def a_ ( _A ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0],
[-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def a_ ( _A , _A = None ) -> np.ndarray:
"""simple docstring"""
# Ensure event is not empty
if event is None:
snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCamelCase : List[Any] = transform(29979245)
print("""Example of four vector: """)
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
__UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 307 | 0 |
def __lowerCAmelCase (SCREAMING_SNAKE_CASE=2_8123 )-> Tuple:
"""simple docstring"""
snake_case_ = [1] * (limit + 1)
for i in range(2 , int(limit**0.5 ) + 1 ):
sum_divs[i * i] += i
for k in range(i + 1 , limit // i + 1 ):
sum_divs[k * i] += k + i
snake_case_ = set()
snake_case_ = 0
for n in range(1 , limit + 1 ):
if sum_divs[n] > n:
abundants.add(lowercase__ )
if not any((n - a in abundants) for a in abundants ):
res += n
return res
if __name__ == "__main__":
print(solution()) | 362 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 267 | 0 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
return getitem, k
def _lowerCamelCase( lowercase__ , lowercase__ ) -> int:
'''simple docstring'''
return setitem, k, v
def _lowerCamelCase( lowercase__ ) -> List[str]:
'''simple docstring'''
return delitem, k
def _lowerCamelCase( lowercase__ , lowercase__ , *lowercase__ ) -> Tuple:
'''simple docstring'''
try:
return fun(lowercase__ , *lowercase__ ), None
except Exception as e:
return None, e
lowerCAmelCase = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
lowerCAmelCase = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
lowerCAmelCase = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
lowerCAmelCase = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def _lowerCamelCase( lowercase__ ) -> List[Any]:
'''simple docstring'''
__lowercase= HashMap(initial_block_size=4 )
__lowercase= {}
for _, (fun, *args) in enumerate(lowercase__ ):
__lowercase, __lowercase= _run_operation(lowercase__ , lowercase__ , *lowercase__ )
__lowercase, __lowercase= _run_operation(lowercase__ , lowercase__ , *lowercase__ )
assert my_res == py_res
assert str(lowercase__ ) == str(lowercase__ )
assert set(lowercase__ ) == set(lowercase__ )
assert len(lowercase__ ) == len(lowercase__ )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase( ) -> str:
'''simple docstring'''
def is_public(lowercase__ ) -> bool:
return not name.startswith('_' )
__lowercase= {name for name in dir({} ) if is_public(lowercase__ )}
__lowercase= {name for name in dir(HashMap() ) if is_public(lowercase__ )}
assert dict_public_names > hash_public_names
| 295 |
import os
import numpy
import onnx
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= a.name
__lowercase= b.name
__lowercase= ''
__lowercase= ''
__lowercase= a == b
__lowercase= name_a
__lowercase= name_b
return res
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(lowercase__ , lowercase__ )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
_graph_replace_input_with(node_proto.attribute[1].g , lowercase__ , lowercase__ )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> str:
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(lowercase__ , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ , lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= list(model.graph.initializer )
__lowercase= list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
__lowercase= inits[i].name
__lowercase= inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowercase__ , lowercase__ )
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
__lowercase= os.path.dirname(lowercase__ )
__lowercase= os.path.basename(lowercase__ )
__lowercase= onnx.load(os.path.join(lowercase__ , lowercase__ ) )
__lowercase= list(model.graph.initializer )
__lowercase= set()
__lowercase= {}
__lowercase= []
__lowercase= 0
for i in range(len(lowercase__ ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(lowercase__ ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(lowercase__ )
dup_set.add(lowercase__ )
__lowercase= inits[j].data_type
__lowercase= numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print('unexpected data type: ' , lowercase__ )
total_reduced_size += mem_size
__lowercase= inits[i].name
__lowercase= inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowercase__ )
else:
__lowercase= [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , 'GB' )
__lowercase= sorted(lowercase__ )
_remove_dup_initializers_from_model(lowercase__ , lowercase__ , lowercase__ )
__lowercase= 'optimized_' + model_file_name
__lowercase= os.path.join(lowercase__ , lowercase__ )
onnx.save(lowercase__ , lowercase__ )
return new_model
| 295 | 1 |
'''simple docstring'''
lowercase__ = {
"meter": "m",
"kilometer": "km",
"megametre": "Mm",
"gigametre": "Gm",
"terametre": "Tm",
"petametre": "Pm",
"exametre": "Em",
"zettametre": "Zm",
"yottametre": "Ym",
}
# Exponent of the factor(meter)
lowercase__ = {
"m": 0,
"km": 3,
"Mm": 6,
"Gm": 9,
"Tm": 1_2,
"Pm": 1_5,
"Em": 1_8,
"Zm": 2_1,
"Ym": 2_4,
}
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
'''simple docstring'''
snake_case : str = from_type.lower().strip('''s''' )
snake_case : Tuple = to_type.lower().strip('''s''' )
snake_case : Dict = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
snake_case : Tuple = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if from_sanitized not in METRIC_CONVERSION:
snake_case : List[Any] = (
F'Invalid \'from_type\' value: {from_type!r}.\n'
F'Conversion abbreviations are: {", ".join(SCREAMING_SNAKE_CASE__ )}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
if to_sanitized not in METRIC_CONVERSION:
snake_case : Optional[Any] = (
F'Invalid \'to_type\' value: {to_type!r}.\n'
F'Conversion abbreviations are: {", ".join(SCREAMING_SNAKE_CASE__ )}'
)
raise ValueError(SCREAMING_SNAKE_CASE__ )
snake_case : str = METRIC_CONVERSION[from_sanitized]
snake_case : List[str] = METRIC_CONVERSION[to_sanitized]
snake_case : Tuple = 1
if from_exponent > to_exponent:
snake_case : str = from_exponent - to_exponent
else:
snake_case : Optional[int] = -(to_exponent - from_exponent)
return value * pow(10 , SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 83 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""image_processor""", """tokenizer"""]
lowerCamelCase = """CLIPImageProcessor"""
lowerCamelCase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
snake_case : Optional[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Dict , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : List[str] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
snake_case : List[Any] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
snake_case : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def lowerCAmelCase ( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Any ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 83 | 1 |
import random
from .binary_exp_mod import bin_exp_mod
def snake_case_ ( snake_case , snake_case=10_00 ) -> Dict:
if n < 2:
return False
if n % 2 == 0:
return n == 2
# this means n is odd
lowercase__: List[str] = n - 1
lowercase__: Tuple = 0
while d % 2 == 0:
d /= 2
exp += 1
# n - 1=d*(2**exp)
lowercase__: List[str] = 0
while count < prec:
lowercase__: Dict = random.randint(2 , n - 1 )
lowercase__: List[Any] = bin_exp_mod(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if b != 1:
lowercase__: Union[str, Any] = True
for _ in range(lowerCamelCase_ ):
if b == n - 1:
lowercase__: int = False
break
lowercase__: List[Any] = b * b
b %= n
if flag:
return False
count += 1
return True
if __name__ == "__main__":
__lowerCAmelCase = abs(int(input('''Enter bound : ''').strip()))
print('''Here\'s the list of primes:''')
print(''', '''.join(str(i) for i in range(n + 1) if is_prime_big(i)))
| 196 |
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
__snake_case : Dict =logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase__)
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
def __init__(self ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
# No specific FOR_XXX available yet
def __call__(self ,__lowerCamelCase ,**__lowerCamelCase ) -> List[Any]:
"""simple docstring"""
return super().__call__(__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,**__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : str = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : List[str] = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : int = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=None ,__lowerCamelCase="This is a sound of {}." ) -> str:
"""simple docstring"""
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
if audio.startswith('''http://''' ) or audio.startswith('''https://''' ):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
lowerCAmelCase__ : List[str] = requests.get(__lowerCamelCase ).content
else:
with open(__lowerCamelCase ,'''rb''' ) as f:
lowerCAmelCase__ : int = f.read()
if isinstance(__lowerCamelCase ,__lowerCamelCase ):
lowerCAmelCase__ : Tuple = ffmpeg_read(__lowerCamelCase ,self.feature_extractor.sampling_rate )
if not isinstance(__lowerCamelCase ,np.ndarray ):
raise ValueError('''We expect a numpy ndarray as input''' )
if len(audio.shape ) != 1:
raise ValueError('''We expect a single channel audio input for ZeroShotAudioClassificationPipeline''' )
lowerCAmelCase__ : Any = self.feature_extractor(
[audio] ,sampling_rate=self.feature_extractor.sampling_rate ,return_tensors='''pt''' )
lowerCAmelCase__ : Union[str, Any] = candidate_labels
lowerCAmelCase__ : str = [hypothesis_template.format(__lowerCamelCase ) for x in candidate_labels]
lowerCAmelCase__ : Any = self.tokenizer(__lowerCamelCase ,return_tensors=self.framework ,padding=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = [text_inputs]
return inputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('''candidate_labels''' )
lowerCAmelCase__ : List[str] = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] ,__lowerCamelCase ):
lowerCAmelCase__ : List[str] = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : List[str] = text_inputs[0][0]
lowerCAmelCase__ : Union[str, Any] = self.model(**__lowerCamelCase ,**__lowerCamelCase )
lowerCAmelCase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_audio,
}
return model_outputs
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = model_outputs.pop('''candidate_labels''' )
lowerCAmelCase__ : Optional[Any] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowerCAmelCase__ : str = logits.softmax(dim=0 )
lowerCAmelCase__ : Dict = probs.tolist()
else:
raise ValueError('''`tf` framework not supported.''' )
lowerCAmelCase__ : Any = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCamelCase ,__lowerCamelCase ) ,key=lambda __lowerCamelCase : -x[0] )
]
return result
| 129 | 0 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
UpperCamelCase__ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
UpperCamelCase__ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''')
def a__ ( lowerCAmelCase__ , lowerCAmelCase__=False ) -> Dict:
UpperCAmelCase__ : str = create_model(
'''HTSAT-tiny''' , '''roberta''' , lowerCAmelCase__ , precision='''fp32''' , device='''cuda:0''' if torch.cuda.is_available() else '''cpu''' , enable_fusion=lowerCAmelCase__ , fusion_type='''aff_2d''' if enable_fusion else None , )
return model, model_cfg
def a__ ( lowerCAmelCase__ ) -> Union[str, Any]:
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : str = r'''.*sequential.(\d+).*'''
UpperCAmelCase__ : int = r'''.*_projection.(\d+).*'''
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase__ : str = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
# replace sequential layers with list
UpperCAmelCase__ : List[str] = re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 )
UpperCAmelCase__ : str = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(lowerCAmelCase__ )//3}.linear.""" )
elif re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase__ : Union[str, Any] = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase__ : List[Any] = 1 if projecton_layer == 0 else 2
UpperCAmelCase__ : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase__ : Optional[Any] = value
UpperCAmelCase__ : Any = mixed_qkv.size(0 ) // 3
UpperCAmelCase__ : Any = mixed_qkv[:qkv_dim]
UpperCAmelCase__ : Dict = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase__ : Optional[int] = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase__ : Dict = query_layer
UpperCAmelCase__ : Union[str, Any] = key_layer
UpperCAmelCase__ : List[str] = value_layer
else:
UpperCAmelCase__ : int = value
return model_state_dict
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ) -> Union[str, Any]:
UpperCAmelCase__ : str = init_clap(lowerCAmelCase__ , enable_fusion=lowerCAmelCase__ )
clap_model.eval()
UpperCAmelCase__ : Tuple = clap_model.state_dict()
UpperCAmelCase__ : Dict = rename_state_dict(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = ClapConfig()
UpperCAmelCase__ : List[Any] = enable_fusion
UpperCAmelCase__ : Optional[int] = ClapModel(lowerCAmelCase__ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
transformers_config.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''')
UpperCamelCase__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 361 |
'''simple docstring'''
from __future__ import annotations
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> tuple[float, list[float]]:
UpperCAmelCase__ : Optional[Any] = list(range(len(lowerCAmelCase__ ) ) )
UpperCAmelCase__ : Optional[Any] = [v / w for v, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
index.sort(key=lambda lowerCAmelCase__ : ratio[i] , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : float = 0
UpperCAmelCase__ : list[float] = [0] * len(lowerCAmelCase__ )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase__ : List[str] = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase__ : Tuple = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 0 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
UpperCAmelCase_ : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> List[Any]:
"""simple docstring"""
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__A ):
return ext
raise Exception(
F"""Unable to determine file format from file extension {path}. """
F"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def SCREAMING_SNAKE_CASE_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
a_ : Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
a_ : List[str] = try_infer_format_from_ext(args.input ) if args.format == 'infer' else args.format
a_ : List[Any] = PipelineDataFormat.from_str(
format=__A , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__A , __A )
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def __init__( self : Tuple , SCREAMING_SNAKE_CASE__ : Pipeline , SCREAMING_SNAKE_CASE__ : PipelineDataFormat ) -> str:
a_ : List[str] = nlp
a_ : Dict = reader
@staticmethod
def SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ : ArgumentParser ) -> Dict:
a_ : List[Any] = parser.add_parser('run' , help='Run a pipeline through the CLI' )
run_parser.add_argument('--task' , choices=get_supported_tasks() , help='Task to run' )
run_parser.add_argument('--input' , type=A_ , help='Path to the file to use for inference' )
run_parser.add_argument('--output' , type=A_ , help='Path to the file that will be used post to write results.' )
run_parser.add_argument('--model' , type=A_ , help='Name or path to the model to instantiate.' )
run_parser.add_argument('--config' , type=A_ , help='Name or path to the model\'s config to instantiate.' )
run_parser.add_argument(
'--tokenizer' , type=A_ , help='Name of the tokenizer to use. (default: same as the model name)' )
run_parser.add_argument(
'--column' , type=A_ , help='Name of the column to use as input. (For multi columns input as QA use column1,columns2)' , )
run_parser.add_argument(
'--format' , type=A_ , default='infer' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='Input format to read from' , )
run_parser.add_argument(
'--device' , type=A_ , default=-1 , help='Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)' , )
run_parser.add_argument('--overwrite' , action='store_true' , help='Allow overwriting the output file.' )
run_parser.set_defaults(func=A_ )
def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
a_ , a_ : List[Any] = self._nlp, []
for entry in self._reader:
a_ : Dict = nlp(**A_ ) if self._reader.is_multi_columns else nlp(A_ )
if isinstance(A_ , A_ ):
outputs.append(A_ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
a_ : str = self._reader.save_binary(A_ )
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(A_ )
| 32 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class A:
'''simple docstring'''
def __init__( self : str , A_ : Optional[Any] , ) -> str:
"""simple docstring"""
lowerCamelCase_ = parent
lowerCamelCase_ = 13
lowerCamelCase_ = 7
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = 2
lowerCamelCase_ = 99
lowerCamelCase_ = 0
lowerCamelCase_ = 32
lowerCamelCase_ = 2
lowerCamelCase_ = 4
lowerCamelCase_ = 0.1
lowerCamelCase_ = 0.1
lowerCamelCase_ = 512
lowerCamelCase_ = 16
lowerCamelCase_ = 2
lowerCamelCase_ = 0.02
lowerCamelCase_ = 3
lowerCamelCase_ = 4
lowerCamelCase_ = 'last'
lowerCamelCase_ = True
lowerCamelCase_ = None
lowerCamelCase_ = 0
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase_ = None
if self.use_input_lengths:
lowerCamelCase_ = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self : int , A_ : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : int , A_ : Tuple , A_ : Optional[int] , A_ : Optional[int] , A_ : str , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
lowerCamelCase_ = [input_ids, input_mask]
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self : Tuple , A_ : List[str] , A_ : int , A_ : List[Any] , A_ : Any , A_ : Any , A_ : Dict , A_ : str , A_ : List[Any] , A_ : Union[str, Any] , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertWithLMHeadModel(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths, 'langs': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self : str , A_ : Tuple , A_ : Any , A_ : Any , A_ : List[Any] , A_ : Dict , A_ : List[Any] , A_ : Union[str, Any] , A_ : Optional[int] , A_ : List[Any] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForQuestionAnsweringSimple(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a__ ( self : Optional[int] , A_ : List[Any] , A_ : str , A_ : List[str] , A_ : Dict , A_ : Optional[Any] , A_ : Tuple , A_ : str , A_ : Optional[int] , A_ : Tuple , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertForSequenceClassification(A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'lengths': input_lengths}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a__ ( self : Dict , A_ : Optional[Any] , A_ : List[Any] , A_ : int , A_ : Any , A_ : Union[str, Any] , A_ : str , A_ : Any , A_ : Union[str, Any] , A_ : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = TFFlaubertForTokenClassification(config=A_ )
lowerCamelCase_ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self : List[Any] , A_ : Optional[int] , A_ : List[Any] , A_ : Optional[int] , A_ : Tuple , A_ : Union[str, Any] , A_ : int , A_ : str , A_ : Tuple , A_ : str , ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = TFFlaubertForMultipleChoice(config=A_ )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = tf.tile(tf.expand_dims(A_ , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase_ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowerCamelCase_ = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) , (
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'langs': token_type_ids,
'lengths': input_lengths,
}
return config, inputs_dict
@require_tf
class A( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
UpperCamelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
def a__ ( self : Union[str, Any] , A_ : Any , A_ : List[Any] , A_ : Union[str, Any] , A_ : str , A_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=A_ , emb_dim=37 )
def a__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A_ )
def a__ ( self : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A_ )
def a__ ( self : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A_ )
def a__ ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A_ )
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A_ )
def a__ ( self : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A_ )
@slow
def a__ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = TFFlaubertModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_tf
@require_sentencepiece
@require_tokenizers
class A( unittest.TestCase ):
'''simple docstring'''
@slow
def a__ ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_ = TFFlaubertModel.from_pretrained('jplu/tf-flaubert-small-cased' )
lowerCamelCase_ = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase_ = model(A_ )[0]
lowerCamelCase_ = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , A_ )
# compare the actual values for a slice.
lowerCamelCase_ = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 204 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list:
lowerCAmelCase__ : int = word.split()
def justify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Any = max_width - width
lowerCAmelCase__ : Optional[int] = len(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ : Tuple = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ : int = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ : Any = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(SCREAMING_SNAKE_CASE_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ : List[Any] = []
for i in range(SCREAMING_SNAKE_CASE_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * ' ' )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : list[str] = []
lowerCAmelCase__ : str = 0
for word in words:
if width + len(SCREAMING_SNAKE_CASE_ ) + len(SCREAMING_SNAKE_CASE_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(SCREAMING_SNAKE_CASE_ )
width += len(SCREAMING_SNAKE_CASE_ )
else:
# justify the line and add it to result
answer.append(justify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# reset new line and new width
lowerCAmelCase__ : Any = [word], len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = max_width - width - len(SCREAMING_SNAKE_CASE_ )
answer.append(' '.join(SCREAMING_SNAKE_CASE_ ) + (remaining_spaces + 1) * ' ' )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 361 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[list[int]]:
lowerCAmelCase__ : list[list[int]] = []
create_all_state(1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , [] , SCREAMING_SNAKE_CASE_ )
return result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(SCREAMING_SNAKE_CASE_ , total_number - level + 2 ):
current_list.append(SCREAMING_SNAKE_CASE_ )
create_all_state(i + 1 , SCREAMING_SNAKE_CASE_ , level - 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_list.pop()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> None:
for i in total_list:
print(*SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = 4
lowerCamelCase__ = 2
lowerCamelCase__ = generate_all_combinations(n, k)
print_all_state(total_list) | 307 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase__ = [
'''python''',
'''tqdm''',
'''regex''',
'''requests''',
'''packaging''',
'''filelock''',
'''numpy''',
'''tokenizers''',
'''huggingface-hub''',
'''safetensors''',
'''accelerate''',
'''pyyaml''',
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase_ ( __snake_case , __snake_case=None ) -> Union[str, Any]:
"""simple docstring"""
require_version(deps[pkg] , __snake_case )
| 5 |
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def a__ ( a__ , a__ , a__ , a__ , a__=False , a__=True ):
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
__SCREAMING_SNAKE_CASE = config_class.from_json_file(a__ )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
print(F'Building TensorFlow model from configuration: {config}' )
__SCREAMING_SNAKE_CASE = model_class(a__ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__SCREAMING_SNAKE_CASE = cached_file(
a__ , a__ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__SCREAMING_SNAKE_CASE = load_pytorch_checkpoint_in_tfa_model(a__ , a__ )
if compare_with_pt_model:
__SCREAMING_SNAKE_CASE = tf_model(tf_model.dummy_inputs , training=a__ ) # build the network
__SCREAMING_SNAKE_CASE = torch.load(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = pt_model_class.from_pretrained(
pretrained_model_name_or_path=a__ , config=a__ , state_dict=a__ )
with torch.no_grad():
__SCREAMING_SNAKE_CASE = pt_model(**pt_model.dummy_inputs )
__SCREAMING_SNAKE_CASE = pto[0].numpy()
__SCREAMING_SNAKE_CASE = tfo[0].numpy()
__SCREAMING_SNAKE_CASE = np.amax(np.abs(np_pt - np_tf ) )
print(F'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, F'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(F'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(a__ , save_format="""h5""" )
def a__ ( a__ , a__ , a__=None , a__=None , a__=False , a__=False , a__=False , a__=False , ):
"""simple docstring"""
if args_model_type is None:
__SCREAMING_SNAKE_CASE = list(MODEL_CLASSES.keys() )
else:
__SCREAMING_SNAKE_CASE = [args_model_type]
for j, model_type in enumerate(a__ , start=1 ):
print("""=""" * 1_00 )
print(F' Converting model type {j}/{len(a__ )}: {model_type}' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__SCREAMING_SNAKE_CASE = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(a__ , a__ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
__SCREAMING_SNAKE_CASE = model_shortcut_name
elif only_convert_finetuned_models:
print(F' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
F' Converting checkpoint {i}/{len(a__ )}: {model_shortcut_name} - model_type {model_type}' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__SCREAMING_SNAKE_CASE = cached_file(a__ , a__ , force_download=not use_cached_models )
else:
__SCREAMING_SNAKE_CASE = model_shortcut_name
if os.path.isfile(a__ ):
__SCREAMING_SNAKE_CASE = """converted_model"""
convert_pt_checkpoint_to_tf(
model_type=a__ , pytorch_checkpoint_path=a__ , config_file=a__ , tf_dump_path=os.path.join(a__ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=a__ , )
if remove_cached_files:
os.remove(a__ )
os.remove(a__ )
if __name__ == "__main__":
UpperCAmelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 267 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Tuple = [[1, 2, 4], [1, 2, 3, 4]]
__lowerCAmelCase : Any = DisjunctiveConstraint(lowerCAmelCase )
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase ):
DisjunctiveConstraint(lowerCAmelCase ) # fails here
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = [[1, 2, 3], [1, 2, 4]]
__lowerCAmelCase : List[Any] = DisjunctiveConstraint(lowerCAmelCase )
__lowerCAmelCase : List[str] = dc.update(1 )
__lowerCAmelCase : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase : int = dc.update(2 )
__lowerCAmelCase : int = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase : Any = dc.update(3 )
__lowerCAmelCase : int = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__lowerCAmelCase : Dict = DisjunctiveConstraint(lowerCAmelCase )
__lowerCAmelCase : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase : Optional[int] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__lowerCAmelCase : Union[str, Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__lowerCAmelCase : Optional[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__lowerCAmelCase : Optional[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__lowerCAmelCase : Optional[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 353 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : Dict =(EulerDiscreteScheduler,)
lowerCamelCase : Dict =10
def SCREAMING_SNAKE_CASE ( self : Optional[int] , **lowerCAmelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Dict = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowerCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
"""simple docstring"""
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowerCAmelCase , beta_end=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Any = self.scheduler_classes[0]
__lowerCAmelCase : int = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : List[Any] = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Tuple = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : str = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Optional[int] = output.prev_sample
__lowerCAmelCase : str = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
__lowerCAmelCase : List[str] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : int = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
__lowerCAmelCase : List[Any] = sample.to(lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase : Any = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Any = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Dict = output.prev_sample
__lowerCAmelCase : List[str] = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Optional[int] = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2676e-06 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCAmelCase : Dict = self.dummy_model()
__lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : Dict = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : List[str] = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : int = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : Any = output.prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : str = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : List[Any] = scheduler_class(**lowerCAmelCase , use_karras_sigmas=lowerCAmelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCAmelCase )
__lowerCAmelCase : str = torch.manual_seed(0 )
__lowerCAmelCase : str = self.dummy_model()
__lowerCAmelCase : str = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
__lowerCAmelCase : int = sample.to(lowerCAmelCase )
for t in scheduler.timesteps:
__lowerCAmelCase : int = scheduler.scale_model_input(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Dict = model(lowerCAmelCase , lowerCAmelCase )
__lowerCAmelCase : Tuple = scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , generator=lowerCAmelCase )
__lowerCAmelCase : List[Any] = output.prev_sample
__lowerCAmelCase : Tuple = torch.sum(torch.abs(lowerCAmelCase ) )
__lowerCAmelCase : Any = torch.mean(torch.abs(lowerCAmelCase ) )
assert abs(result_sum.item() - 124.52_2994_9951_1719 ) < 1e-2
assert abs(result_mean.item() - 0.1_6213_9326_3339_9963 ) < 1e-3
| 139 | 0 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
snake_case_ : int = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
snake_case_ : Dict = parser.parse_args()
snake_case_ : List[Any] = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 83 |
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : Dict = 3
_UpperCamelCase : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83 | 1 |
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a =logging.get_logger(__name__)
a ={
"""vocab_file""": """vocab.txt""",
"""merges_file""": """bpe.codes""",
}
a ={
"""vocab_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""",
},
"""merges_file""": {
"""vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""",
"""vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""",
},
}
a ={
"""vinai/phobert-base""": 256,
"""vinai/phobert-large""": 256,
}
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : List[str] = set()
__lowerCamelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase : Any = char
__lowerCamelCase : Union[str, Any] = set(lowerCamelCase__ )
return pairs
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
_UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any="<s>" ,SCREAMING_SNAKE_CASE__ : Dict="</s>" ,SCREAMING_SNAKE_CASE__ : int="</s>" ,SCREAMING_SNAKE_CASE__ : Dict="<s>" ,SCREAMING_SNAKE_CASE__ : Optional[int]="<unk>" ,SCREAMING_SNAKE_CASE__ : Dict="<pad>" ,SCREAMING_SNAKE_CASE__ : List[str]="<mask>" ,**SCREAMING_SNAKE_CASE__ : Any ,):
super().__init__(
bos_token=SCREAMING_SNAKE_CASE__ ,eos_token=SCREAMING_SNAKE_CASE__ ,unk_token=SCREAMING_SNAKE_CASE__ ,sep_token=SCREAMING_SNAKE_CASE__ ,cls_token=SCREAMING_SNAKE_CASE__ ,pad_token=SCREAMING_SNAKE_CASE__ ,mask_token=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
__lowerCamelCase : Tuple = vocab_file
__lowerCamelCase : Any = merges_file
__lowerCamelCase : Optional[int] = {}
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = 2
__lowerCamelCase : str = 3
self.add_from_file(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : List[str] = {v: k for k, v in self.encoder.items()}
with open(SCREAMING_SNAKE_CASE__ ,encoding='utf-8') as merges_handle:
__lowerCamelCase : Tuple = merges_handle.read().split('\n')[:-1]
__lowerCamelCase : int = [tuple(merge.split()[:-1]) for merge in merges]
__lowerCamelCase : Union[str, Any] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__))))
__lowerCamelCase : Tuple = {}
def lowerCAmelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase : str = [self.cls_token_id]
__lowerCamelCase : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None ,SCREAMING_SNAKE_CASE__ : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE__ ,token_ids_a=SCREAMING_SNAKE_CASE__ ,already_has_special_tokens=SCREAMING_SNAKE_CASE__)
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE__)) + [1]
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[int] ,SCREAMING_SNAKE_CASE__ : Optional[List[int]] = None):
__lowerCamelCase : Any = [self.sep_token_id]
__lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
@property
def lowerCAmelCase ( self : List[Any]):
return len(self.encoder)
def lowerCAmelCase ( self : Any):
return dict(self.encoder ,**self.added_tokens_encoder)
def lowerCAmelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : List[str]):
if token in self.cache:
return self.cache[token]
__lowerCamelCase : str = tuple(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Dict = tuple(list(word[:-1]) + [word[-1] + '</w>'])
__lowerCamelCase : Optional[int] = get_pairs(SCREAMING_SNAKE_CASE__)
if not pairs:
return token
while True:
__lowerCamelCase : str = min(SCREAMING_SNAKE_CASE__ ,key=lambda SCREAMING_SNAKE_CASE__: self.bpe_ranks.get(SCREAMING_SNAKE_CASE__ ,float('inf')))
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase : Dict = bigram
__lowerCamelCase : str = []
__lowerCamelCase : Dict = 0
while i < len(SCREAMING_SNAKE_CASE__):
try:
__lowerCamelCase : List[Any] = word.index(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
__lowerCamelCase : List[str] = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE__) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
__lowerCamelCase : List[str] = tuple(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Any = new_word
if len(SCREAMING_SNAKE_CASE__) == 1:
break
else:
__lowerCamelCase : Any = get_pairs(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : str = '@@ '.join(SCREAMING_SNAKE_CASE__)
__lowerCamelCase : Optional[Any] = word[:-4]
__lowerCamelCase : int = word
return word
def lowerCAmelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[str]):
__lowerCamelCase : Tuple = []
__lowerCamelCase : Tuple = re.findall(R'\S+\n?' ,SCREAMING_SNAKE_CASE__)
for token in words:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE__).split(' ')))
return split_tokens
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Optional[int]):
return self.encoder.get(SCREAMING_SNAKE_CASE__ ,self.encoder.get(self.unk_token))
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int):
return self.decoder.get(SCREAMING_SNAKE_CASE__ ,self.unk_token)
def lowerCAmelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : Dict):
__lowerCamelCase : Dict = ' '.join(SCREAMING_SNAKE_CASE__).replace('@@ ' ,'').strip()
return out_string
def lowerCAmelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Optional[str] = None):
if not os.path.isdir(SCREAMING_SNAKE_CASE__):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
__lowerCamelCase : str = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
__lowerCamelCase : Tuple = os.path.join(
SCREAMING_SNAKE_CASE__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(SCREAMING_SNAKE_CASE__):
copyfile(self.vocab_file ,SCREAMING_SNAKE_CASE__)
if os.path.abspath(self.merges_file) != os.path.abspath(SCREAMING_SNAKE_CASE__):
copyfile(self.merges_file ,SCREAMING_SNAKE_CASE__)
return out_vocab_file, out_merge_file
def lowerCAmelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any]):
if isinstance(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__):
try:
with open(SCREAMING_SNAKE_CASE__ ,'r' ,encoding='utf-8') as fd:
self.add_from_file(SCREAMING_SNAKE_CASE__)
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"Incorrect encoding detected in {f}, please rebuild the dataset")
return
__lowerCamelCase : Optional[Any] = f.readlines()
for lineTmp in lines:
__lowerCamelCase : Union[str, Any] = lineTmp.strip()
__lowerCamelCase : Optional[Any] = line.rfind(' ')
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'')
__lowerCamelCase : Union[str, Any] = line[:idx]
__lowerCamelCase : Tuple = len(self.encoder)
| 113 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
a ="""src/transformers"""
a ="""docs/source/en"""
a ="""."""
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[int]:
with open(lowerCamelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Any = f.readlines()
# Find the start prompt.
__lowerCamelCase : List[str] = 0
while not lines[start_index].startswith(lowerCamelCase__ ):
start_index += 1
start_index += 1
__lowerCamelCase : int = start_index
while not lines[end_index].startswith(lowerCamelCase__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
a ="""Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
a =re.compile(r"""TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
a =re.compile(r"""Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
a =re.compile(r"""(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)""")
# This is to make sure the transformers module imported is the one in the repo.
a =direct_transformers_import(TRANSFORMERS_PATH)
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> List[str]:
__lowerCamelCase : int = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , lowerCamelCase__ )
return [m.group(0 ) for m in matches]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
__lowerCamelCase : int = 2 if text == 'โ
' or text == 'โ' else len(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = (width - text_length) // 2
__lowerCamelCase : List[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def SCREAMING_SNAKE_CASE__ ( ) -> str:
__lowerCamelCase : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
__lowerCamelCase : List[str] = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
__lowerCamelCase : Dict = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
__lowerCamelCase : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
__lowerCamelCase : List[str] = collections.defaultdict(lowerCamelCase__ )
__lowerCamelCase : Union[str, Any] = collections.defaultdict(lowerCamelCase__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(lowerCamelCase__ ):
__lowerCamelCase : List[Any] = None
if attr_name.endswith('Tokenizer' ):
__lowerCamelCase : Dict = slow_tokenizers
__lowerCamelCase : List[Any] = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
__lowerCamelCase : Union[str, Any] = fast_tokenizers
__lowerCamelCase : str = attr_name[:-1_3]
elif _re_tf_models.match(lowerCamelCase__ ) is not None:
__lowerCamelCase : List[str] = tf_models
__lowerCamelCase : Optional[int] = _re_tf_models.match(lowerCamelCase__ ).groups()[0]
elif _re_flax_models.match(lowerCamelCase__ ) is not None:
__lowerCamelCase : List[Any] = flax_models
__lowerCamelCase : Optional[Any] = _re_flax_models.match(lowerCamelCase__ ).groups()[0]
elif _re_pt_models.match(lowerCamelCase__ ) is not None:
__lowerCamelCase : Optional[int] = pt_models
__lowerCamelCase : Any = _re_pt_models.match(lowerCamelCase__ ).groups()[0]
if lookup_dict is not None:
while len(lowerCamelCase__ ) > 0:
if attr_name in model_name_to_prefix.values():
__lowerCamelCase : List[Any] = True
break
# Try again after removing the last word in the name
__lowerCamelCase : str = ''.join(camel_case_split(lowerCamelCase__ )[:-1] )
# Let's build that table!
__lowerCamelCase : str = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
__lowerCamelCase : Union[str, Any] = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
__lowerCamelCase : List[Any] = [len(lowerCamelCase__ ) + 2 for c in columns]
__lowerCamelCase : int = max([len(lowerCamelCase__ ) for name in model_names] ) + 2
# Build the table per se
__lowerCamelCase : Union[str, Any] = '|' + '|'.join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for c, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
__lowerCamelCase : List[str] = {True: 'โ
', False: 'โ'}
for name in model_names:
__lowerCamelCase : Optional[int] = model_name_to_prefix[name]
__lowerCamelCase : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(lowerCamelCase__ , lowerCamelCase__ ) for l, w in zip(lowerCamelCase__ , lowerCamelCase__ )] ) + "|\n"
return table
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__=False ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[str] = _find_text_in_file(
filename=os.path.join(lowerCamelCase__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
__lowerCamelCase : List[Any] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(lowerCamelCase__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
a =argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
a =parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 113 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase__ = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[str]=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("cls_token", "vit.embeddings.cls_token"),
("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ""
else:
__lowerCAmelCase = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def _a ( SCREAMING_SNAKE_CASE_ : str ):
__lowerCAmelCase = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = val
def _a ( ):
__lowerCAmelCase = "http://images.cocodataset.org/val2017/000000039769.jpg"
__lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 10_00
__lowerCAmelCase = "huggingface/label-files"
__lowerCAmelCase = "imagenet-1k-id2label.json"
__lowerCAmelCase = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
__lowerCAmelCase = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 3_84
__lowerCAmelCase = 15_36
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load("facebookresearch/dino:main" , SCREAMING_SNAKE_CASE_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE_ , base_model=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(SCREAMING_SNAKE_CASE_ , add_pooling_layer=SCREAMING_SNAKE_CASE_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(SCREAMING_SNAKE_CASE_ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img() , return_tensors="pt" )
__lowerCAmelCase = encoding["pixel_values"]
__lowerCAmelCase = model(SCREAMING_SNAKE_CASE_ )
if base_model:
__lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ )
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
__lowerCAmelCase = original_model(SCREAMING_SNAKE_CASE_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE_ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""dino_vitb16""",
type=str,
help="""Name of the model trained with DINO you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--base_model""",
action="""store_true""",
help="""Whether to only convert the base model (no projection head weights).""",
)
parser.set_defaults(base_model=True)
UpperCamelCase__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 92 |
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ ="Wav2Vec2FeatureExtractor"
UpperCAmelCase_ ="AutoTokenizer"
def __init__( self , _A , _A ) -> Dict:
super().__init__(_A , _A )
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
@classmethod
def _UpperCamelCase ( cls , _A , **_A ) -> List[str]:
try:
return super().from_pretrained(_A , **_A )
except OSError:
warnings.warn(
F'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , _A , )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor.from_pretrained(_A , **_A )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer.from_pretrained(_A , **_A )
return cls(feature_extractor=_A , tokenizer=_A )
def __call__( self , *_A , **_A ) -> Any:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''raw_speech''' )
else:
SCREAMING_SNAKE_CASE_ = kwargs.pop('''audio''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''sampling_rate''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''text''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor(_A , *_A , sampling_rate=_A , **_A )
if text is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ = encodings['''input_ids''']
return inputs
def _UpperCamelCase ( self , *_A , **_A ) -> Union[str, Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*_A , **_A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''input_features''' , _A )
SCREAMING_SNAKE_CASE_ = kwargs.pop('''labels''' , _A )
if len(_A ) > 0:
SCREAMING_SNAKE_CASE_ = args[0]
SCREAMING_SNAKE_CASE_ = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ = self.feature_extractor.pad(_A , *_A , **_A )
if labels is not None:
SCREAMING_SNAKE_CASE_ = self.tokenizer.pad(_A , **_A )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ = labels['''input_ids''']
return input_features
def _UpperCamelCase ( self , *_A , **_A ) -> Any:
return self.tokenizer.batch_decode(*_A , **_A )
def _UpperCamelCase ( self , *_A , **_A ) -> Optional[Any]:
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _UpperCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ = self.feature_extractor
SCREAMING_SNAKE_CASE_ = False
| 299 | 0 |
'''simple docstring'''
def UpperCamelCase_( snake_case : int , snake_case : int , snake_case : int ):
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
snake_case_ = _modexpt(__a , exponent // 2 , __a ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__a , exponent - 1 , __a )) % modulo_value
def UpperCamelCase_( snake_case : int = 1_7_7_7 , snake_case : int = 1_8_5_5 , snake_case : int = 8 ):
'''simple docstring'''
snake_case_ = base
for _ in range(1 , __a ):
snake_case_ = _modexpt(__a , __a , 1_0**digits )
return result
if __name__ == "__main__":
print(F"{solution() = }")
| 365 |
'''simple docstring'''
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_SCREAMING_SNAKE_CASE : Tuple = get_logger(__name__)
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : int , snake_case : List[Any]=0 ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
snake_case_ = os.path.join(snake_case , snake_case )
if accelerator.process_index == 0:
logger.info(f'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Saving model to {output_model_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(f'Saving model to {ckpt_dir}' )
snake_case_ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=snake_case , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'Model saved to {ckpt_dir}' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : int , snake_case : Union[str, Any] , snake_case : Any=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin'
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading model from {input_model_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
snake_case_ = (
f'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading model from {input_model_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
snake_case_ = (
os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' )
if f'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading model from {ckpt_dir}' )
snake_case_ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case , storage_reader=dist_cp.FileSystemReader(snake_case ) , planner=DefaultLoadPlanner() , )
snake_case_ = state_dict["model"]
logger.info(f'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case )
def UpperCamelCase_( snake_case : str , snake_case : List[str] , snake_case : Any , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple=0 ):
'''simple docstring'''
os.makedirs(snake_case , exist_ok=snake_case )
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
snake_case_ = FSDP.optim_state_dict(snake_case , snake_case )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
snake_case_ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case , snake_case )
logger.info(f'Optimizer state saved in {output_optimizer_file}' )
else:
snake_case_ = os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case , exist_ok=snake_case )
logger.info(f'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , )
logger.info(f'Optimizer state saved in {ckpt_dir}' )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : int , snake_case : Optional[int] , snake_case : Union[str, Any]=0 ):
'''simple docstring'''
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
snake_case_ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
snake_case_ = (
f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
snake_case_ = os.path.join(snake_case , snake_case )
logger.info(f'Loading Optimizer state from {input_optimizer_file}' )
snake_case_ = torch.load(snake_case )
logger.info(f'Optimizer state loaded from {input_optimizer_file}' )
else:
snake_case_ = (
os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' )
if f'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(f'Loading Optimizer from {ckpt_dir}' )
snake_case_ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(snake_case ) , )
snake_case_ = optim_state["optimizer"]
logger.info(f'Optimizer loaded from {ckpt_dir}' )
snake_case_ = FSDP.optim_state_dict_to_load(snake_case , snake_case , snake_case )
optimizer.load_state_dict(snake_case )
| 92 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[str]:
lowerCAmelCase_ :Any = {
"""task_specific_params""": {
"""summarization""": {"""length_penalty""": 1.0, """max_length""": 128, """min_length""": 12, """num_beams""": 4},
"""summarization_cnn""": {"""length_penalty""": 2.0, """max_length""": 142, """min_length""": 56, """num_beams""": 4},
"""summarization_xsum""": {"""length_penalty""": 1.0, """max_length""": 62, """min_length""": 11, """num_beams""": 6},
}
}
lowerCAmelCase_ :Dict = {
"""task_specific_params.summarization.length_penalty""": 1.0,
"""task_specific_params.summarization.max_length""": 128,
"""task_specific_params.summarization.min_length""": 12,
"""task_specific_params.summarization.num_beams""": 4,
"""task_specific_params.summarization_cnn.length_penalty""": 2.0,
"""task_specific_params.summarization_cnn.max_length""": 142,
"""task_specific_params.summarization_cnn.min_length""": 56,
"""task_specific_params.summarization_cnn.num_beams""": 4,
"""task_specific_params.summarization_xsum.length_penalty""": 1.0,
"""task_specific_params.summarization_xsum.max_length""": 62,
"""task_specific_params.summarization_xsum.min_length""": 11,
"""task_specific_params.summarization_xsum.num_beams""": 6,
}
self.assertEqual(flatten_dict(__A ) , __A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(__A ) , x.transpose() ) )
lowerCAmelCase_ :Tuple = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = np.random.randn(3 , 4 )
lowerCAmelCase_ :Optional[Any] = torch.tensor(__A )
self.assertTrue(np.allclose(transpose(__A ) , transpose(__A ).numpy() ) )
lowerCAmelCase_ :Dict = np.random.randn(3 , 4 , 5 )
lowerCAmelCase_ :Union[str, Any] = torch.tensor(__A )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , transpose(__A , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Tuple = np.random.randn(3 , 4 )
lowerCAmelCase_ :Any = tf.constant(__A )
self.assertTrue(np.allclose(transpose(__A ) , transpose(__A ).numpy() ) )
lowerCAmelCase_ :Optional[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase_ :Optional[int] = tf.constant(__A )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , transpose(__A , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Any = np.random.randn(3 , 4 )
lowerCAmelCase_ :Optional[int] = jnp.array(__A )
self.assertTrue(np.allclose(transpose(__A ) , np.asarray(transpose(__A ) ) ) )
lowerCAmelCase_ :int = np.random.randn(3 , 4 , 5 )
lowerCAmelCase_ :int = jnp.array(__A )
self.assertTrue(np.allclose(transpose(__A , axes=(1, 2, 0) ) , np.asarray(transpose(__A , axes=(1, 2, 0) ) ) ) )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
lowerCAmelCase_ :Union[str, Any] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , np.reshape(__A , (4, 3) ) ) )
lowerCAmelCase_ :List[Any] = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , np.reshape(__A , (12, 5) ) ) )
@require_torch
def __lowerCAmelCase ( self ) -> int:
lowerCAmelCase_ :Dict = np.random.randn(3 , 4 )
lowerCAmelCase_ :Optional[Any] = torch.tensor(__A )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , reshape(__A , (4, 3) ).numpy() ) )
lowerCAmelCase_ :List[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase_ :List[Any] = torch.tensor(__A )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , reshape(__A , (12, 5) ).numpy() ) )
@require_tf
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :List[Any] = np.random.randn(3 , 4 )
lowerCAmelCase_ :List[str] = tf.constant(__A )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , reshape(__A , (4, 3) ).numpy() ) )
lowerCAmelCase_ :Optional[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase_ :Any = tf.constant(__A )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , reshape(__A , (12, 5) ).numpy() ) )
@require_flax
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Optional[int] = np.random.randn(3 , 4 )
lowerCAmelCase_ :Union[str, Any] = jnp.array(__A )
self.assertTrue(np.allclose(reshape(__A , (4, 3) ) , np.asarray(reshape(__A , (4, 3) ) ) ) )
lowerCAmelCase_ :List[Any] = np.random.randn(3 , 4 , 5 )
lowerCAmelCase_ :List[Any] = jnp.array(__A )
self.assertTrue(np.allclose(reshape(__A , (12, 5) ) , np.asarray(reshape(__A , (12, 5) ) ) ) )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(__A ) , np.squeeze(__A ) ) )
lowerCAmelCase_ :Optional[int] = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , np.squeeze(__A , axis=2 ) ) )
@require_torch
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[int] = np.random.randn(1 , 3 , 4 )
lowerCAmelCase_ :List[Any] = torch.tensor(__A )
self.assertTrue(np.allclose(squeeze(__A ) , squeeze(__A ).numpy() ) )
lowerCAmelCase_ :Union[str, Any] = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase_ :Optional[Any] = torch.tensor(__A )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , squeeze(__A , axis=2 ).numpy() ) )
@require_tf
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :str = np.random.randn(1 , 3 , 4 )
lowerCAmelCase_ :Optional[int] = tf.constant(__A )
self.assertTrue(np.allclose(squeeze(__A ) , squeeze(__A ).numpy() ) )
lowerCAmelCase_ :Dict = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase_ :Union[str, Any] = tf.constant(__A )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , squeeze(__A , axis=2 ).numpy() ) )
@require_flax
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Tuple = np.random.randn(1 , 3 , 4 )
lowerCAmelCase_ :str = jnp.array(__A )
self.assertTrue(np.allclose(squeeze(__A ) , np.asarray(squeeze(__A ) ) ) )
lowerCAmelCase_ :Any = np.random.randn(1 , 4 , 1 , 5 )
lowerCAmelCase_ :Optional[int] = jnp.array(__A )
self.assertTrue(np.allclose(squeeze(__A , axis=2 ) , np.asarray(squeeze(__A , axis=2 ) ) ) )
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :Optional[int] = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , np.expand_dims(__A , axis=1 ) ) )
@require_torch
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :int = np.random.randn(3 , 4 )
lowerCAmelCase_ :str = torch.tensor(__A )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , expand_dims(__A , axis=1 ).numpy() ) )
@require_tf
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :str = np.random.randn(3 , 4 )
lowerCAmelCase_ :Any = tf.constant(__A )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , expand_dims(__A , axis=1 ).numpy() ) )
@require_flax
def __lowerCAmelCase ( self ) -> str:
lowerCAmelCase_ :List[str] = np.random.randn(3 , 4 )
lowerCAmelCase_ :int = jnp.array(__A )
self.assertTrue(np.allclose(expand_dims(__A , axis=1 ) , np.asarray(expand_dims(__A , axis=1 ) ) ) )
| 84 |
class __SCREAMING_SNAKE_CASE( a_ ):
pass
class __SCREAMING_SNAKE_CASE( a_ ):
pass
class __SCREAMING_SNAKE_CASE:
def __init__( self: List[str] ) -> Union[str, Any]:
snake_case__ = [
[],
[],
[],
]
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: int ) -> None:
try:
if len(self.queues[priority] ) >= 1_00:
raise OverflowError('Maximum queue size is 100' )
self.queues[priority].append(UpperCamelCase )
except IndexError:
raise ValueError('Valid priorities are 0, 1, and 2' )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('All queues are empty' )
def __str__( self: Union[str, Any] ) -> str:
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Union[str, Any] ) -> Any:
snake_case__ = []
def lowerCAmelCase_ ( self: str , UpperCamelCase: int ) -> None:
if len(self.queue ) == 1_00:
raise OverFlowError('Maximum queue size is 100' )
self.queue.append(UpperCamelCase )
def lowerCAmelCase_ ( self: int ) -> int:
if not self.queue:
raise UnderFlowError('The queue is empty' )
else:
snake_case__ = min(self.queue )
self.queue.remove(UpperCamelCase )
return data
def __str__( self: Optional[Any] ) -> str:
return str(self.queue )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_A )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def a_ ( ) -> List[Any]:
"""simple docstring"""
snake_case__ = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_A )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 307 | 0 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
__a = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
__a = 1 - (matter_density + radiation_density + dark_energy)
__a = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__a = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
__snake_case :Union[str, Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 367 |
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
def get_matched_characters(_UpperCAmelCase , _UpperCAmelCase ) -> str:
__a = []
__a = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
__a = int(max(0 , i - limit ) )
__a = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(_UpperCAmelCase )
__a = f'{_stra[0:_stra.index(_UpperCAmelCase )]} {_stra[_stra.index(_UpperCAmelCase ) + 1:]}'
return "".join(_UpperCAmelCase )
# matching characters
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = get_matched_characters(_UpperCAmelCase , _UpperCAmelCase )
__a = len(_UpperCAmelCase )
# transposition
__a = (
len([(ca, ca) for ca, ca in zip(_UpperCAmelCase , _UpperCAmelCase ) if ca != ca] ) // 2
)
if not match_count:
__a = 0.0
else:
__a = (
1
/ 3
* (
match_count / len(_UpperCAmelCase )
+ match_count / len(_UpperCAmelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
__a = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world'''))
| 131 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
"""facebook/nllb-moe-54B""": """https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json""",
}
class a__ ( _a ):
_a : Optional[int] = '''nllb-moe'''
_a : Dict = ['''past_key_values''']
_a : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , _A=1_2_8_1_1_2 , _A=1_0_2_4 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=1_2 , _A=4_0_9_6 , _A=1_6 , _A=0.05 , _A=0.05 , _A=True , _A=True , _A="relu" , _A=1_0_2_4 , _A=0.1 , _A=0.1 , _A=0.0 , _A=0.02 , _A=2 , _A=True , _A=False , _A="float32" , _A=False , _A=1_2_8 , _A=6_4 , _A=4 , _A=4 , _A=0.0_01 , _A=0.0_01 , _A="all" , _A=False , _A=False , _A=1.0 , _A=0.2 , _A=1 , _A=0 , _A=2 , _A=False , **_A , ):
"""simple docstring"""
__lowerCAmelCase = vocab_size
__lowerCAmelCase = max_position_embeddings
__lowerCAmelCase = d_model
__lowerCAmelCase = encoder_ffn_dim
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = encoder_attention_heads
__lowerCAmelCase = decoder_ffn_dim
__lowerCAmelCase = decoder_layers
__lowerCAmelCase = decoder_attention_heads
__lowerCAmelCase = dropout
__lowerCAmelCase = attention_dropout
__lowerCAmelCase = activation_dropout
__lowerCAmelCase = activation_function
__lowerCAmelCase = init_std
__lowerCAmelCase = encoder_layerdrop
__lowerCAmelCase = decoder_layerdrop
__lowerCAmelCase = use_cache
__lowerCAmelCase = encoder_layers
__lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCAmelCase = router_z_loss_coef
__lowerCAmelCase = router_aux_loss_coef
__lowerCAmelCase = decoder_sparse_step
__lowerCAmelCase = encoder_sparse_step
__lowerCAmelCase = num_experts
__lowerCAmelCase = expert_capacity
__lowerCAmelCase = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}""" )
__lowerCAmelCase = router_dtype
__lowerCAmelCase = router_ignore_padding_tokens
__lowerCAmelCase = batch_prioritized_routing
__lowerCAmelCase = second_expert_policy
__lowerCAmelCase = normalize_router_prob_before_dropping
__lowerCAmelCase = moe_eval_capacity_token_fraction
__lowerCAmelCase = moe_token_dropout
__lowerCAmelCase = output_router_logits
super().__init__(
pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , is_encoder_decoder=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
| 92 |
'''simple docstring'''
import math
def A_ ( snake_case , snake_case ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 139 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( _UpperCamelCase ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> XGBClassifier:
"""simple docstring"""
snake_case_ : Optional[Any] = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = load_iris()
snake_case_ , snake_case_ : str = data_handling(_UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 )
snake_case_ : List[str] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
snake_case_ : int = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 279 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Tuple = []
snake_case_ : List[str] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Union[str, Any] = resnets
snake_case_ : Union[str, Any] = attentions
if self.add_downsample:
snake_case_ : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
snake_case_ : str = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : List[str] = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : Union[str, Any] = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if i == 0 else self.out_channels
snake_case_ : Tuple = FlaxResnetBlockaD(
in_channels=__magic_name__ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Dict = resnets
if self.add_downsample:
snake_case_ : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Any = ()
for resnet in self.resnets:
snake_case_ : List[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
output_states += (hidden_states,)
if self.add_downsample:
snake_case_ : str = self.downsamplers_a(__magic_name__ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = []
snake_case_ : Optional[Any] = []
for i in range(self.num_layers ):
snake_case_ : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : List[str] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : List[Any] = resnets
snake_case_ : Tuple = attentions
if self.add_upsample:
snake_case_ : List[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Union[str, Any]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case_ : Dict = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Tuple = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[Any] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = True
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case_ : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case_ : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case_ : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Tuple = resnets
if self.add_upsample:
snake_case_ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> List[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
snake_case_ : Tuple = res_hidden_states_tuple[-1]
snake_case_ : List[Any] = res_hidden_states_tuple[:-1]
snake_case_ : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case_ : Optional[Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
if self.add_upsample:
snake_case_ : Optional[int] = self.upsamplers_a(__magic_name__ )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
lowerCamelCase_ : int
lowerCamelCase_ : float = 0.0
lowerCamelCase_ : int = 1
lowerCamelCase_ : int = 1
lowerCamelCase_ : bool = False
lowerCamelCase_ : bool = False
lowerCamelCase_ : jnp.dtype = jnp.floataa
def lowerCamelCase (self ) -> str:
'''simple docstring'''
snake_case_ : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case_ : int = []
for _ in range(self.num_layers ):
snake_case_ : str = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__magic_name__ )
snake_case_ : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__magic_name__ )
snake_case_ : Optional[Any] = resnets
snake_case_ : Optional[int] = attentions
def __call__(self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=True ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[Any] = self.resnets[0](__magic_name__ , __magic_name__ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case_ : Tuple = attn(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
snake_case_ : Union[str, Any] = resnet(__magic_name__ , __magic_name__ , deterministic=__magic_name__ )
return hidden_states
| 279 | 1 |
"""simple docstring"""
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=0 ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE = 1.0 if scale is None else scale
SCREAMING_SNAKE_CASE = 0.0 if loc is None else loc
super().__init__(lowerCAmelCase__ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowerCAmelCase__ )] )
@property
def __A ( self ) -> Tuple:
return self.base_dist.mean * self.scale + self.loc
@property
def __A ( self ) -> Union[str, Any]:
return self.base_dist.variance * self.scale**2
@property
def __A ( self ) -> List[Any]:
return self.variance.sqrt()
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> None:
super().__init__(**lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = args_dim
SCREAMING_SNAKE_CASE = nn.ModuleList([nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ ) for dim in args_dim.values()] )
SCREAMING_SNAKE_CASE = domain_map
def __A ( self , lowerCAmelCase__ ) -> Tuple[torch.Tensor]:
SCREAMING_SNAKE_CASE = [proj(lowerCAmelCase__ ) for proj in self.proj]
return self.domain_map(*lowerCAmelCase__ )
class lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase__ ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE = function
def __A ( self , lowerCAmelCase__ , *lowerCAmelCase__ ) -> List[Any]:
return self.function(lowerCAmelCase__ , *lowerCAmelCase__ )
class lowerCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : type
SCREAMING_SNAKE_CASE_ : int
SCREAMING_SNAKE_CASE_ : Dict[str, int]
def __init__( self , lowerCAmelCase__ = 1 ) -> None:
SCREAMING_SNAKE_CASE = dim
SCREAMING_SNAKE_CASE = {k: dim * self.args_dim[k] for k in self.args_dim}
def __A ( self , lowerCAmelCase__ ) -> Union[str, Any]:
if self.dim == 1:
return self.distribution_class(*lowerCAmelCase__ )
else:
return Independent(self.distribution_class(*lowerCAmelCase__ ) , 1 )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ) -> Distribution:
SCREAMING_SNAKE_CASE = self._base_distribution(lowerCAmelCase__ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowerCAmelCase__ , loc=lowerCAmelCase__ , scale=lowerCAmelCase__ , event_dim=self.event_dim )
@property
def __A ( self ) -> Tuple:
return () if self.dim == 1 else (self.dim,)
@property
def __A ( self ) -> int:
return len(self.event_shape )
@property
def __A ( self ) -> float:
return 0.0
def __A ( self , lowerCAmelCase__ ) -> nn.Module:
return ParameterProjection(
in_features=lowerCAmelCase__ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def __A ( self , *lowerCAmelCase__ ) -> Dict:
raise NotImplementedError()
@staticmethod
def __A ( lowerCAmelCase__ ) -> torch.Tensor:
return (x + torch.sqrt(torch.square(lowerCAmelCase__ ) + 4.0 )) / 2.0
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict[str, int] = {"df": 1, "loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE_ : type = StudentT
@classmethod
def __A ( cls , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = cls.squareplus(lowerCAmelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
SCREAMING_SNAKE_CASE = 2.0 + cls.squareplus(lowerCAmelCase__ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict[str, int] = {"loc": 1, "scale": 1}
SCREAMING_SNAKE_CASE_ : type = Normal
@classmethod
def __A ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
SCREAMING_SNAKE_CASE = cls.squareplus(lowerCAmelCase__ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict[str, int] = {"total_count": 1, "logits": 1}
SCREAMING_SNAKE_CASE_ : type = NegativeBinomial
@classmethod
def __A ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = cls.squareplus(lowerCAmelCase__ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def __A ( self , lowerCAmelCase__ ) -> Distribution:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowerCAmelCase__ , logits=lowerCAmelCase__ )
else:
return Independent(self.distribution_class(total_count=lowerCAmelCase__ , logits=lowerCAmelCase__ ) , 1 )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None ) -> Distribution:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 113 |
"""simple docstring"""
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase (SCREAMING_SNAKE_CASE_ : BertModel , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str ) -> Dict:
SCREAMING_SNAKE_CASE = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
SCREAMING_SNAKE_CASE = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = model.state_dict()
def to_tf_var_name(SCREAMING_SNAKE_CASE_ : str ):
for patt, repl in iter(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE = name.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return F'bert/{name}'
def create_tf_var(SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : tf.Session ):
SCREAMING_SNAKE_CASE = tf.dtypes.as_dtype(tensor.dtype )
SCREAMING_SNAKE_CASE = tf.get_variable(dtype=SCREAMING_SNAKE_CASE_ , shape=tensor.shape , name=SCREAMING_SNAKE_CASE_ , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(SCREAMING_SNAKE_CASE_ )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
SCREAMING_SNAKE_CASE = to_tf_var_name(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
SCREAMING_SNAKE_CASE = torch_tensor.T
SCREAMING_SNAKE_CASE = create_tf_var(tensor=SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ , session=SCREAMING_SNAKE_CASE_ )
tf.keras.backend.set_value(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = session.run(SCREAMING_SNAKE_CASE_ )
print(F'Successfully created {tf_name}: {np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}' )
SCREAMING_SNAKE_CASE = tf.train.Saver(tf.trainable_variables() )
saver.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , model_name.replace('-' , '_' ) + '.ckpt' ) )
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any]=None ) -> Any:
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Directory in which to save tensorflow model' )
SCREAMING_SNAKE_CASE = parser.parse_args(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=SCREAMING_SNAKE_CASE_ , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 113 | 1 |
A : Tuple = '''
# Transformers ์ค์น ๋ฐฉ๋ฒ
! pip install transformers datasets
# ๋ง์ง๋ง ๋ฆด๋ฆฌ์ค ๋์ ์์ค์์ ์ค์นํ๋ ค๋ฉด, ์ ๋ช
๋ น์ ์ฃผ์์ผ๋ก ๋ฐ๊พธ๊ณ ์๋ ๋ช
๋ น์ ํด์ ํ์ธ์.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A : Optional[int] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A : int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 276 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
A__ = prime_factors(__a )
if is_square_free(__a ):
return -1 if len(__a ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 276 | 1 |
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
lowerCAmelCase : Optional[int] = logging.getLogger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=None):
super().__init__(
lowerCAmelCase__ , question_encoder_tokenizer=lowerCAmelCase__ , generator_tokenizer=lowerCAmelCase__ , index=lowerCAmelCase__ , init_retrieval=lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: Any = None
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : int):
logger.info("initializing retrieval")
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized")
# needs to be set manually
SCREAMING_SNAKE_CASE_: Dict = self._infer_socket_ifname()
# avoid clash with the NCCL port
SCREAMING_SNAKE_CASE_: List[Any] = str(distributed_port + 1)
SCREAMING_SNAKE_CASE_: int = dist.new_group(ranks=lowerCAmelCase__ , backend="gloo")
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main")
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
return dist.get_rank(group=self.process_group) == 0
def _SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int]=torch.floataa):
SCREAMING_SNAKE_CASE_: str = torch.empty(lowerCAmelCase__ , dtype=lowerCAmelCase__)
dist.scatter(lowerCAmelCase__ , src=0 , scatter_list=lowerCAmelCase__ , group=self.process_group)
return target_tensor
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_: int = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
SCREAMING_SNAKE_CASE_: Any = next((addr for addr in addrs if addr.startswith("e")) , lowerCAmelCase__)
return ifname
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : np.ndarray , lowerCAmelCase__ : int):
# single GPU training
if not dist.is_initialized():
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = self._main_retrieve(lowerCAmelCase__ , lowerCAmelCase__)
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(lowerCAmelCase__)
# distributed training
SCREAMING_SNAKE_CASE_: Dict = dist.get_world_size(group=self.process_group)
# gather logic
SCREAMING_SNAKE_CASE_: Optional[Any] = None
if self._is_main():
SCREAMING_SNAKE_CASE_: Optional[int] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa) for _ in range(lowerCAmelCase__)]
dist.gather(torch.tensor(lowerCAmelCase__) , dst=0 , gather_list=lowerCAmelCase__ , group=self.process_group)
# scatter logic
SCREAMING_SNAKE_CASE_: Optional[Any] = question_hidden_states.shape[0]
SCREAMING_SNAKE_CASE_: Dict = []
SCREAMING_SNAKE_CASE_: List[Any] = []
if self._is_main():
assert len(lowerCAmelCase__) == world_size
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self._main_retrieve(torch.cat(lowerCAmelCase__).numpy() , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = torch.tensor(lowerCAmelCase__), torch.tensor(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self._chunk_tensor(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = self._scattered(lowerCAmelCase__ , [n_queries, n_docs] , target_type=torch.intaa)
SCREAMING_SNAKE_CASE_: int = self._scattered(lowerCAmelCase__ , [n_queries, n_docs, question_hidden_states.shape[1]])
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(lowerCAmelCase__)
| 13 |
from queue import PriorityQueue
from typing import Any
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : set , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : PriorityQueue , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
__lowerCAmelCase = cst_fwd.get(SCREAMING_SNAKE_CASE_ , np.inf )
__lowerCAmelCase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
__lowerCAmelCase = new_cost_f
__lowerCAmelCase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
__lowerCAmelCase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def _a ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : dict , SCREAMING_SNAKE_CASE_ : dict ):
__lowerCAmelCase = -1
__lowerCAmelCase = set()
__lowerCAmelCase = set()
__lowerCAmelCase = {source: 0}
__lowerCAmelCase = {destination: 0}
__lowerCAmelCase = {source: None}
__lowerCAmelCase = {destination: None}
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = PriorityQueue()
__lowerCAmelCase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
__lowerCAmelCase , __lowerCAmelCase = queue_forward.get()
visited_forward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = queue_backward.get()
visited_backward.add(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
__lowerCAmelCase = pass_and_relaxation(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
__lowerCAmelCase = shortest_distance
return shortest_path_distance
UpperCamelCase__ = {
"""B""": [["""C""", 1]],
"""C""": [["""D""", 1]],
"""D""": [["""F""", 1]],
"""E""": [["""B""", 1], ["""G""", 2]],
"""F""": [],
"""G""": [["""F""", 1]],
}
UpperCamelCase__ = {
"""B""": [["""E""", 1]],
"""C""": [["""B""", 1]],
"""D""": [["""C""", 1]],
"""F""": [["""D""", 1], ["""G""", 1]],
"""E""": [[None, np.inf]],
"""G""": [["""E""", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 0 |
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
lowerCamelCase__ : Union[str, Any] = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class lowerCamelCase_ ( tr.AbstractTransform ):
'''simple docstring'''
def __init__( self : Optional[int] , _lowerCAmelCase : str = " " ):
SCREAMING_SNAKE_CASE_ = sentence_delimiter
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str ):
return list(_lowerCAmelCase )
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : List[str] ):
SCREAMING_SNAKE_CASE_ = []
for sent_idx, sentence in enumerate(_lowerCAmelCase ):
chars.extend(self.process_string(_lowerCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(_lowerCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
lowerCamelCase__ : int = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
lowerCamelCase__ : int = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
lowerCamelCase__ : Dict = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCamelCase__ : Optional[int] = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
lowerCamelCase__ : int = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase_ ( datasets.Metric ):
'''simple docstring'''
def lowerCAmelCase_ ( self : Optional[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def lowerCAmelCase_ ( self : Union[str, Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : Union[str, Any]=False ):
if concatenate_texts:
return jiwer.compute_measures(
_lowerCAmelCase , _lowerCAmelCase , truth_transform=_lowerCAmelCase , hypothesis_transform=_lowerCAmelCase , )["wer"]
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 0
for prediction, reference in zip(_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = jiwer.compute_measures(
_lowerCAmelCase , _lowerCAmelCase , truth_transform=_lowerCAmelCase , hypothesis_transform=_lowerCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 210 |
import cva
import numpy as np
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCAmelCase : float , _lowerCAmelCase : int ):
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('invalid k value' )
def __str__( self : Tuple ):
return str(self.k )
def lowerCAmelCase_ ( self : int , _lowerCAmelCase : str ):
SCREAMING_SNAKE_CASE_ = cva.imread(_lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(_lowerCAmelCase , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = np.gradient(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(_lowerCAmelCase , h - offset ):
for x in range(_lowerCAmelCase , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
lowerCamelCase__ : Optional[int] = HarrisCorner(0.04, 3)
lowerCamelCase__ , lowerCamelCase__ : str = edge_detect.detect('path_to_image')
cva.imwrite('detect.png', color_img) | 210 | 1 |
import json
import logging
import os
import socket
import git
import numpy as np
import torch
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - PID: %(process)d - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
__UpperCamelCase : Any = logging.getLogger(__name__)
def a_ ( _A ) -> List[Any]:
"""simple docstring"""
snake_case__ = git.Repo(search_parent_directories=_a )
snake_case__ = {
'''repo_id''': str(_a ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
}
with open(os.path.join(_a , 'git_log.json' ) , 'w' ) as f:
json.dump(_a , _a , indent=4 )
def a_ ( _A ) -> Union[str, Any]:
"""simple docstring"""
if params.n_gpu <= 0:
snake_case__ = 0
snake_case__ = -1
snake_case__ = True
snake_case__ = False
return
assert torch.cuda.is_available()
logger.info('Initializing GPUs' )
if params.n_gpu > 1:
assert params.local_rank != -1
snake_case__ = int(os.environ['WORLD_SIZE'] )
snake_case__ = int(os.environ['N_GPU_NODE'] )
snake_case__ = int(os.environ['RANK'] )
# number of nodes / node ID
snake_case__ = params.world_size // params.n_gpu_per_node
snake_case__ = params.global_rank // params.n_gpu_per_node
snake_case__ = True
assert params.n_nodes == int(os.environ['N_NODES'] )
assert params.node_id == int(os.environ['NODE_RANK'] )
# local job (single GPU)
else:
assert params.local_rank == -1
snake_case__ = 1
snake_case__ = 0
snake_case__ = 0
snake_case__ = 0
snake_case__ = 1
snake_case__ = 1
snake_case__ = False
# sanity checks
assert params.n_nodes >= 1
assert 0 <= params.node_id < params.n_nodes
assert 0 <= params.local_rank <= params.global_rank < params.world_size
assert params.world_size == params.n_nodes * params.n_gpu_per_node
# define whether this is the master process / if we are in multi-node distributed mode
snake_case__ = params.node_id == 0 and params.local_rank == 0
snake_case__ = params.n_nodes > 1
# summary
snake_case__ = f'''--- Global rank: {params.global_rank} - '''
logger.info(PREFIX + 'Number of nodes: %i' % params.n_nodes )
logger.info(PREFIX + 'Node ID : %i' % params.node_id )
logger.info(PREFIX + 'Local rank : %i' % params.local_rank )
logger.info(PREFIX + 'World size : %i' % params.world_size )
logger.info(PREFIX + 'GPUs per node : %i' % params.n_gpu_per_node )
logger.info(PREFIX + 'Master : %s' % str(params.is_master ) )
logger.info(PREFIX + 'Multi-node : %s' % str(params.multi_node ) )
logger.info(PREFIX + 'Multi-GPU : %s' % str(params.multi_gpu ) )
logger.info(PREFIX + 'Hostname : %s' % socket.gethostname() )
# set GPU device
torch.cuda.set_device(params.local_rank )
# initialize multi-GPU
if params.multi_gpu:
logger.info('Initializing PyTorch distributed' )
torch.distributed.init_process_group(
init_method='env://' , backend='nccl' , )
def a_ ( _A ) -> int:
"""simple docstring"""
np.random.seed(args.seed )
torch.manual_seed(args.seed )
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed )
| 307 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple=13 , _SCREAMING_SNAKE_CASE : Tuple=32 , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : List[Any]=3 , _SCREAMING_SNAKE_CASE : str=16 , _SCREAMING_SNAKE_CASE : Union[str, Any]=[1, 2, 1] , _SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] , _SCREAMING_SNAKE_CASE : str=2 , _SCREAMING_SNAKE_CASE : Optional[int]=2.0 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : Dict=0.0 , _SCREAMING_SNAKE_CASE : str=0.0 , _SCREAMING_SNAKE_CASE : List[str]=0.1 , _SCREAMING_SNAKE_CASE : Tuple="gelu" , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : List[Any]=0.02 , _SCREAMING_SNAKE_CASE : Any=1E-5 , _SCREAMING_SNAKE_CASE : Tuple=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Any=10 , _SCREAMING_SNAKE_CASE : Union[str, Any]=8 , )-> Dict:
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Dict = num_channels
lowerCAmelCase__ : Dict = embed_dim
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : Tuple = num_heads
lowerCAmelCase__ : Dict = window_size
lowerCAmelCase__ : List[str] = mlp_ratio
lowerCAmelCase__ : str = qkv_bias
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Tuple = drop_path_rate
lowerCAmelCase__ : Dict = hidden_act
lowerCAmelCase__ : Tuple = use_absolute_embeddings
lowerCAmelCase__ : int = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Any = scope
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : Tuple = type_sequence_label_size
lowerCAmelCase__ : Any = encoder_stride
def UpperCAmelCase__( self : str )-> Optional[int]:
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Dict = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__( self : Optional[int] )-> str:
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )-> int:
lowerCAmelCase__ : Union[str, Any] = SwinvaModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : List[str] = model(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Any )-> List[Any]:
lowerCAmelCase__ : Optional[int] = SwinvaForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[Any] )-> Union[str, Any]:
lowerCAmelCase__ : Tuple = self.type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = SwinvaForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
lowerCAmelCase__ : Any = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__( self : Tuple )-> str:
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowercase , _lowercase , unittest.TestCase):
_a : str = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
_a : Tuple = (
{'''feature-extraction''': SwinvaModel, '''image-classification''': SwinvaForImageClassification}
if is_torch_available()
else {}
)
_a : List[str] = False
_a : int = False
_a : Optional[int] = False
_a : Optional[Any] = False
def UpperCAmelCase__( self : str )-> Optional[Any]:
lowerCAmelCase__ : Tuple = SwinvaModelTester(self )
lowerCAmelCase__ : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
def UpperCAmelCase__( self : str )-> int:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__( self : Optional[int] )-> Optional[Any]:
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''Got `CUDA error: misaligned address` with PyTorch 2.0.0.''' )
def UpperCAmelCase__( self : Optional[Any] )-> Dict:
pass
@unittest.skip(reason='''Swinv2 does not use inputs_embeds''' )
def UpperCAmelCase__( self : Tuple )-> Optional[int]:
pass
def UpperCAmelCase__( self : List[Any] )-> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def UpperCAmelCase__( self : Any )-> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Union[str, Any] )-> Dict:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Tuple = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Union[str, Any] = False
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : List[str] = outputs.attentions
lowerCAmelCase__ : Union[str, Any] = len(self.model_tester.depths )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : int = True
lowerCAmelCase__ : Dict = config.window_size**2
lowerCAmelCase__ : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : int = len(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if hasattr(self.model_tester , '''num_hidden_states_types''' ):
lowerCAmelCase__ : List[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : str = 2
self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : List[Any] = outputs.attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] )-> Tuple:
lowerCAmelCase__ : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : str = outputs.hidden_states
lowerCAmelCase__ : Optional[int] = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swinv2 has a different seq_length
lowerCAmelCase__ : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Dict = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = reshaped_hidden_states[0].shape
lowerCAmelCase__ : Tuple = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def UpperCAmelCase__( self : Tuple )-> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Any )-> Tuple:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = 3
lowerCAmelCase__ : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Tuple = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Tuple = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
def UpperCAmelCase__( self : Dict )-> Optional[Any]:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str )-> Optional[Any]:
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__( self : Optional[Any] )-> int:
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = SwinvaModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict )-> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Dict = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[str] = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class _a ( unittest.TestCase):
@cached_property
def UpperCAmelCase__( self : Tuple )-> Optional[Any]:
return (
AutoImageProcessor.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__( self : List[Any] )-> List[str]:
lowerCAmelCase__ : Any = SwinvaForImageClassification.from_pretrained('''microsoft/swinv2-tiny-patch4-window8-256''' ).to(
_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = self.default_image_processor
lowerCAmelCase__ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
lowerCAmelCase__ : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
lowerCAmelCase__ : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 131 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = '''bloom'''
UpperCamelCase = ['''past_key_values''']
UpperCamelCase = {
'''num_hidden_layers''': '''n_layer''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int]=250880 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Tuple=8 , _UpperCAmelCase : str=1e-5 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=1 , _UpperCAmelCase : List[str]=2 , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=0.0 , _UpperCAmelCase : Any=0.0 , _UpperCAmelCase : str=1 , _UpperCAmelCase : str=False , **_UpperCAmelCase : str , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ = kwargs.pop("n_embed" , _UpperCAmelCase )
UpperCAmelCase_ = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ = n_layer
UpperCAmelCase_ = n_head
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = pretraining_tp
UpperCAmelCase_ = apply_residual_connection_post_layernorm
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCamelCase = version.parse('''1.12''' )
def __init__( self : Optional[int] , _UpperCAmelCase : PretrainedConfig , _UpperCAmelCase : str = "default" , _UpperCAmelCase : List[PatchingSpec] = None , _UpperCAmelCase : bool = False , ) -> Any:
'''simple docstring'''
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , "pad_token_id" , _UpperCAmelCase ):
# TODO: how to do that better?
UpperCAmelCase_ = 0
@property
def lowercase__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
UpperCAmelCase_ = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction="inputs" , inverted_values_shape=_UpperCAmelCase )
UpperCAmelCase_ = {0: "batch", 1: "past_sequence + sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return common_inputs
@property
def lowercase__ ( self : int ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def lowercase__ ( self : str ) -> int:
'''simple docstring'''
return self._config.n_head
@property
def lowercase__ ( self : List[str] ) -> float:
'''simple docstring'''
return 1e-3
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : "PreTrainedTokenizer" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
UpperCAmelCase_ = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
UpperCAmelCase_ , UpperCAmelCase_ = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
UpperCAmelCase_ = seqlen + 2
UpperCAmelCase_ = self._config.hidden_size // self.num_attention_heads
UpperCAmelCase_ = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
UpperCAmelCase_ = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
UpperCAmelCase_ = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
UpperCAmelCase_ = common_inputs["attention_mask"]
if self.use_past:
UpperCAmelCase_ = ordered_inputs["attention_mask"].dtype
UpperCAmelCase_ = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def lowercase__ ( self : List[Any] ) -> int:
'''simple docstring'''
return 13
| 241 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
lowerCamelCase = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
lowerCamelCase = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
lowerCamelCase = """zero2"""
lowerCamelCase = """zero3"""
lowerCamelCase = [ZEROa, ZEROa]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
UpperCAmelCase_ = parameterized.to_safe_name("_".join(str(lowerCAmelCase__ ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
lowerCamelCase = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class lowercase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Dict , _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str ) -> Any:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : str , _UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
@require_torch_multi_gpu
@parameterized.expand(_UpperCAmelCase , name_func=_UpperCAmelCase )
def lowercase__ ( self : int , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=_UpperCAmelCase , model=_UpperCAmelCase , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = models[model]
UpperCAmelCase_ = self.run_trainer(
stage=_UpperCAmelCase , model_name=_UpperCAmelCase , eval_steps=_UpperCAmelCase , num_train_epochs=1 , distributed=_UpperCAmelCase , fpaa=_UpperCAmelCase , )
self.do_checks(_UpperCAmelCase )
return output_dir
def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int = 10 , _UpperCAmelCase : int = 1 , _UpperCAmelCase : bool = True , _UpperCAmelCase : bool = True , ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir("./xxx" , after=_UpperCAmelCase )
UpperCAmelCase_ = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_UpperCAmelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
UpperCAmelCase_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
UpperCAmelCase_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
UpperCAmelCase_ = self.get_launcher(_UpperCAmelCase )
UpperCAmelCase_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_UpperCAmelCase , env=self.get_env() )
return output_dir
def lowercase__ ( self : List[Any] , _UpperCAmelCase : int=False ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 241 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> int:
"""simple docstring"""
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise TypeError('''only integers accepted as input''' )
else:
snake_case_ : Tuple = str(abs(_UpperCamelCase ) )
snake_case_ : Tuple = [list(_UpperCamelCase ) for char in range(len(_UpperCamelCase ) )]
for index in range(len(_UpperCamelCase ) ):
num_transpositions[index].pop(_UpperCamelCase )
return max(
int(''''''.join(list(_UpperCamelCase ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__('''doctest''').testmod()
| 279 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Any = '''megatron-bert'''
def __init__(self , __magic_name__=2_9056 , __magic_name__=1024 , __magic_name__=24 , __magic_name__=16 , __magic_name__=4096 , __magic_name__="gelu" , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=512 , __magic_name__=2 , __magic_name__=0.02 , __magic_name__=1e-12 , __magic_name__=0 , __magic_name__="absolute" , __magic_name__=True , **__magic_name__ , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=__magic_name__ , **__magic_name__ )
snake_case_ : Union[str, Any] = vocab_size
snake_case_ : Dict = hidden_size
snake_case_ : Dict = num_hidden_layers
snake_case_ : Optional[int] = num_attention_heads
snake_case_ : int = hidden_act
snake_case_ : List[str] = intermediate_size
snake_case_ : Dict = hidden_dropout_prob
snake_case_ : str = attention_probs_dropout_prob
snake_case_ : str = max_position_embeddings
snake_case_ : Any = type_vocab_size
snake_case_ : int = initializer_range
snake_case_ : int = layer_norm_eps
snake_case_ : List[str] = position_embedding_type
snake_case_ : Dict = use_cache
| 279 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 650, """eval_accuracy""": 0.6, """eval_loss""": 0.9},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.g4dn.xlarge""",
"""results""": {"""train_runtime""": 600, """eval_accuracy""": 0.3, """eval_loss""": 0.9},
},
] )
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def A ( self : List[str] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='utf-8' , check=_UpperCAmelCase , )
assert hasattr(self , 'env' )
def A ( self : int , UpperCamelCase__ : Optional[int]=1 ):
"""simple docstring"""
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_UpperCAmelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCAmelCase , hyperparameters={**self.env.hyperparameters, 'model_name_or_path': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='py36' , )
def A ( self : List[str] , UpperCamelCase__ : Optional[int] ):
"""simple docstring"""
TrainingJobAnalytics(_UpperCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
def A ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCamelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
UpperCamelCase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , 'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} , _UpperCAmelCase )
| 369 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase , _a ):
"""simple docstring"""
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = load_tool('text-to-speech' )
self.tool.setup()
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
def A ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = self.tool('hey' )
UpperCamelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5] ) , ) )
| 249 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( UpperCAmelCase__ , unittest.TestCase ):
__UpperCamelCase : int = CLIPTokenizer
__UpperCamelCase : Dict = CLIPTokenizerFast
__UpperCamelCase : List[str] = True
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : str = False
def __UpperCAmelCase ( self :Union[str, Any] ) -> str:
'''simple docstring'''
super().setUp()
# fmt: off
_a : Optional[Any] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
_a : List[Any] =dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
_a : List[Any] =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""]
_a : Dict ={"""unk_token""": """<unk>"""}
_a : List[str] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
_a : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(SCREAMING_SNAKE_CASE ) )
def __UpperCAmelCase ( self :List[Any] , **SCREAMING_SNAKE_CASE :Optional[int] ) -> Dict:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Any , **SCREAMING_SNAKE_CASE :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :str ) -> int:
'''simple docstring'''
_a : int ="""lower newer"""
_a : List[str] ="""lower newer"""
return input_text, output_text
def __UpperCAmelCase ( self :str ) -> List[str]:
'''simple docstring'''
_a : Any =CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_a : Dict ="""lower newer"""
_a : str =["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""]
_a : int =tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_a : List[str] =tokens + [tokenizer.unk_token]
_a : Optional[Any] =[1_0, 2, 1_6, 9, 3, 2, 1_6, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
@require_ftfy
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_a : List[str] =self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Tuple =self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
_a : Union[str, Any] ="""A\n'll 11p223RFโho!!to?'d'd''d of a cat to-$''d."""
_a : List[str] =tokenizer_s.tokenize(SCREAMING_SNAKE_CASE )
_a : List[Any] =tokenizer_r.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test that the tokenization is identical on an example containing a character (Latin Small Letter A
# with Tilde) encoded in 2 different ways
_a : int ="""xa\u0303y""" + """ """ + """x\xe3y"""
_a : Tuple =tokenizer_s.tokenize(SCREAMING_SNAKE_CASE )
_a : str =tokenizer_r.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test that the tokenization is identical on unicode of space type
_a : Tuple =[
"""\u0009""", # (horizontal tab, '\t')
"""\u000B""", # (vertical tab)
"""\u000C""", # (form feed)
"""\u0020""", # (space, ' ')
"""\u200E""", # (left-to-right mark):w
"""\u200F""", # (right-to-left mark)
]
for unicode_seq in spaces_unicodes:
_a : List[Any] =tokenizer_s.tokenize(SCREAMING_SNAKE_CASE )
_a : Union[str, Any] =tokenizer_r.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Test that the tokenization is identical on unicode of line break type
_a : int =[
"""\u000A""", # (line feed, '\n')
"""\r\n""", # (carriage return and line feed, '\r\n')
"""\u000D""", # (carriage return, '\r')
"""\r""", # (carriage return, '\r')
"""\u000D""", # (carriage return, '\r')
"""\u2028""", # (line separator)
"""\u2029""", # (paragraph separator)
# "\u0085", # (next line)
]
# The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms
# it into the Horizontal Ellipsis character "โฆ" ("\u2026") while the fast version transforms it into a
# space (and thus into an empty list).
for unicode_seq in line_break_unicodes:
_a : int =tokenizer_s.tokenize(SCREAMING_SNAKE_CASE )
_a : List[str] =tokenizer_r.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :List[str] ) -> Any:
'''simple docstring'''
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
_a : int ="""hello""" # `hello` is a token in the vocabulary of `pretrained_name`
_a : Any =f"{text_of_1_token} {text_of_1_token}"
_a : str =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , )
_a : Union[str, Any] =tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (0, len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(SCREAMING_SNAKE_CASE ) + 1, len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
_a : Union[str, Any] =f" {text}"
_a : int =self.rust_tokenizer_class.from_pretrained(
SCREAMING_SNAKE_CASE , use_fast=SCREAMING_SNAKE_CASE , )
_a : int =tokenizer_r(SCREAMING_SNAKE_CASE , return_offsets_mapping=SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(SCREAMING_SNAKE_CASE )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(SCREAMING_SNAKE_CASE ) + 1, 1 + len(SCREAMING_SNAKE_CASE ) + 1 + len(SCREAMING_SNAKE_CASE )) , )
def __UpperCAmelCase ( self :List[str] ) -> str:
'''simple docstring'''
# Test related to the breaking change introduced in transformers v4.17.0
# We need to check that an error in raised when the user try to load a previous version of the tokenizer.
with self.assertRaises(SCREAMING_SNAKE_CASE ) as context:
self.rust_tokenizer_class.from_pretrained("""robot-test/old-clip-tokenizer""" )
self.assertTrue(
context.exception.args[0].startswith(
"""The `backend_tokenizer` provided does not match the expected format.""" ) )
@require_ftfy
def __UpperCAmelCase ( self :int ) -> Dict:
'''simple docstring'''
super().test_tokenization_python_rust_equals()
def __UpperCAmelCase ( self :Optional[int] ) -> Optional[int]:
'''simple docstring'''
# CLIP always lower cases letters
pass
| 276 |
'''simple docstring'''
class A__ :
def __init__( self :List[Any] ) -> None:
'''simple docstring'''
_a : dict[str, TrieNode] ={} # Mapping from char to TrieNode
_a : List[str] =False
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :list[str] ) -> None:
'''simple docstring'''
for word in words:
self.insert(SCREAMING_SNAKE_CASE )
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
_a : str =self
for char in word:
if char not in curr.nodes:
_a : Dict =TrieNode()
_a : List[Any] =curr.nodes[char]
_a : int =True
def __UpperCAmelCase ( self :Union[str, Any] , SCREAMING_SNAKE_CASE :str ) -> bool:
'''simple docstring'''
_a : int =self
for char in word:
if char not in curr.nodes:
return False
_a : List[Any] =curr.nodes[char]
return curr.is_leaf
def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :str ) -> None:
'''simple docstring'''
def _delete(SCREAMING_SNAKE_CASE :TrieNode , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int ) -> bool:
if index == len(SCREAMING_SNAKE_CASE ):
# If word does not exist
if not curr.is_leaf:
return False
_a : Any =False
return len(curr.nodes ) == 0
_a : int =word[index]
_a : int =curr.nodes.get(SCREAMING_SNAKE_CASE )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
_a : List[Any] =_delete(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , SCREAMING_SNAKE_CASE , 0 )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : TrieNode ,_UpperCAmelCase : str ) -> None:
if node.is_leaf:
print(_UpperCAmelCase ,end=""" """ )
for key, value in node.nodes.items():
print_words(_UpperCAmelCase ,word + key )
def SCREAMING_SNAKE_CASE_ ( ) -> bool:
_a : List[str] ="""banana bananas bandana band apple all beast""".split()
_a : List[Any] =TrieNode()
root.insert_many(_UpperCAmelCase )
# print_words(root, "")
assert all(root.find(_UpperCAmelCase ) for word in words )
assert root.find("""banana""" )
assert not root.find("""bandanas""" )
assert not root.find("""apps""" )
assert root.find("""apple""" )
assert root.find("""all""" )
root.delete("""all""" )
assert not root.find("""all""" )
root.delete("""banana""" )
assert not root.find("""banana""" )
assert root.find("""bananas""" )
return True
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : str ,_UpperCAmelCase : bool ) -> None:
print(str(_UpperCAmelCase ) ,"""works!""" if passes else """doesn't work :(""" )
def SCREAMING_SNAKE_CASE_ ( ) -> None:
assert test_trie()
def SCREAMING_SNAKE_CASE_ ( ) -> None:
print_results("""Testing trie functionality""" ,test_trie() )
if __name__ == "__main__":
main()
| 276 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
lowerCamelCase__ = (720, 1280) # Height, Width
lowerCamelCase__ = (0.4, 0.6) # if height or width lower than this scale, drop it.
lowerCamelCase__ = 1 / 100
lowerCamelCase__ = """"""
lowerCamelCase__ = """"""
lowerCamelCase__ = """"""
lowerCamelCase__ = 250
def lowerCAmelCase__ ( ) -> None:
lowerCAmelCase__ , lowerCAmelCase__ : Any = get_dataset(__lowerCamelCase , __lowerCamelCase )
for index in range(__lowerCamelCase ):
lowerCAmelCase__ : Dict = random.sample(range(len(__lowerCamelCase ) ) , 4 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = update_image_and_anno(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , filter_scale=__lowerCamelCase , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase__ : List[Any] = random_chars(32 )
lowerCAmelCase__ : Any = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
lowerCAmelCase__ : Union[str, Any] = F'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(F'''{file_root}.jpg''' , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
lowerCAmelCase__ : List[Any] = []
for anno in new_annos:
lowerCAmelCase__ : List[Any] = anno[3] - anno[1]
lowerCAmelCase__ : Optional[int] = anno[4] - anno[2]
lowerCAmelCase__ : str = anno[1] + width / 2
lowerCAmelCase__ : Optional[int] = anno[2] + height / 2
lowerCAmelCase__ : Any = F'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(__lowerCamelCase )
with open(F'''{file_root}.txt''' , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> tuple[list, list]:
lowerCAmelCase__ : List[Any] = []
lowerCAmelCase__ : List[Any] = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , '*.txt' ) ):
lowerCAmelCase__ : Any = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(__lowerCamelCase ) as in_file:
lowerCAmelCase__ : int = in_file.readlines()
lowerCAmelCase__ : List[Any] = os.path.join(__lowerCamelCase , F'''{label_name}.jpg''' )
lowerCAmelCase__ : List[Any] = []
for obj_list in obj_lists:
lowerCAmelCase__ : Optional[int] = obj_list.rstrip('\n' ).split(' ' )
lowerCAmelCase__ : int = float(obj[1] ) - float(obj[3] ) / 2
lowerCAmelCase__ : List[str] = float(obj[2] ) - float(obj[4] ) / 2
lowerCAmelCase__ : Dict = float(obj[1] ) + float(obj[3] ) / 2
lowerCAmelCase__ : Tuple = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , ) -> tuple[list, list, str]:
lowerCAmelCase__ : Tuple = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCAmelCase__ : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase__ : Optional[int] = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase__ : int = int(scale_x * output_size[1] )
lowerCAmelCase__ : List[str] = int(scale_y * output_size[0] )
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Tuple = []
for i, index in enumerate(__lowerCamelCase ):
lowerCAmelCase__ : List[str] = all_img_list[index]
path_list.append(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = all_annos[index]
lowerCAmelCase__ : List[str] = cva.imread(__lowerCamelCase )
if i == 0: # top-left
lowerCAmelCase__ : Tuple = cva.resize(__lowerCamelCase , (divid_point_x, divid_point_y) )
lowerCAmelCase__ : Tuple = img
for bbox in img_annos:
lowerCAmelCase__ : str = bbox[1] * scale_x
lowerCAmelCase__ : int = bbox[2] * scale_y
lowerCAmelCase__ : Dict = bbox[3] * scale_x
lowerCAmelCase__ : Dict = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCAmelCase__ : Union[str, Any] = cva.resize(__lowerCamelCase , (output_size[1] - divid_point_x, divid_point_y) )
lowerCAmelCase__ : Union[str, Any] = img
for bbox in img_annos:
lowerCAmelCase__ : Optional[int] = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase__ : List[Any] = bbox[2] * scale_y
lowerCAmelCase__ : Optional[int] = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase__ : List[str] = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCAmelCase__ : Union[str, Any] = cva.resize(__lowerCamelCase , (divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase__ : List[str] = img
for bbox in img_annos:
lowerCAmelCase__ : Union[str, Any] = bbox[1] * scale_x
lowerCAmelCase__ : Tuple = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase__ : List[Any] = bbox[3] * scale_x
lowerCAmelCase__ : Optional[int] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCAmelCase__ : List[Any] = cva.resize(
__lowerCamelCase , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase__ : Optional[Any] = img
for bbox in img_annos:
lowerCAmelCase__ : str = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase__ : Optional[int] = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase__ : List[Any] = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase__ : Union[str, Any] = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCAmelCase__ : int = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase__ : Optional[int] = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print("""DONE โ
""") | 368 |
import gc
import unittest
import numpy as np
import torch
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = DanceDiffusionPipeline
lowercase = UNCONDITIONAL_AUDIO_GENERATION_PARAMS
lowercase = PipelineTesterMixin.required_optional_params - {
'callback',
'latents',
'callback_steps',
'output_type',
'num_images_per_prompt',
}
lowercase = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS
lowercase = False
lowercase = False
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = UNetaDModel(
block_out_channels=(32, 32, 64) , extra_in_channels=16 , sample_size=512 , sample_rate=16_000 , in_channels=2 , out_channels=2 , flip_sin_to_cos=a , use_timestep_embedding=a , time_embedding_type='fourier' , mid_block_type='UNetMidBlock1D' , down_block_types=('DownBlock1DNoSkip', 'DownBlock1D', 'AttnDownBlock1D') , up_block_types=('AttnUpBlock1D', 'UpBlock1D', 'UpBlock1DNoSkip') , )
lowerCAmelCase__ : Tuple = IPNDMScheduler()
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[Any] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Optional[Any] = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 4,
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline(**a )
lowerCAmelCase__ : Any = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a )
lowerCAmelCase__ : List[str] = output.audios
lowerCAmelCase__ : Optional[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, components["unet"].sample_size)
lowerCAmelCase__ : List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return super().test_save_load_local()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
@skip_mps
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return super().test_save_load_optional_components()
@skip_mps
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return super().test_attention_slicing_forward_pass()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = torch_device
lowerCAmelCase__ : List[str] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' )
lowerCAmelCase__ : List[str] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : int = output.audios
lowerCAmelCase__ : List[Any] = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : Dict = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = torch_device
lowerCAmelCase__ : List[Any] = DanceDiffusionPipeline.from_pretrained('harmonai/maestro-150k' , torch_dtype=torch.floataa )
lowerCAmelCase__ : Optional[int] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = pipe(generator=a , num_inference_steps=100 , audio_length_in_s=4.0_9_6 )
lowerCAmelCase__ : str = output.audios
lowerCAmelCase__ : Tuple = audio[0, -3:, -3:]
assert audio.shape == (1, 2, pipe.unet.sample_size)
lowerCAmelCase__ : int = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] )
assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 | 307 | 0 |
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> List[Any]:
'''simple docstring'''
__lowercase = 0
__lowercase = 0
__lowercase = {}
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if vertex not in self.adjacency:
__lowercase = {}
self.num_vertices += 1
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
self.add_vertex(lowerCAmelCase__ )
self.add_vertex(lowerCAmelCase__ )
if head == tail:
return
__lowercase = weight
__lowercase = weight
def _SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
__lowercase = self.get_edges()
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCAmelCase__ ) ):
__lowercase = list(edges[i] )
edges.sort(key=lambda lowerCAmelCase__ : e[2] )
for i in range(len(lowerCAmelCase__ ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
__lowercase = edges[i][2] + 1
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = weight
__lowercase = weight
def __str__( self ) -> str:
'''simple docstring'''
__lowercase = ''''''
for tail in self.adjacency:
for head in self.adjacency[tail]:
__lowercase = self.adjacency[head][tail]
string += F"{head} -> {tail} == {weight}\n"
return string.rstrip('''\n''' )
def _SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
__lowercase = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.adjacency.keys()
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[Any]:
'''simple docstring'''
__lowercase = Graph()
if vertices is None:
__lowercase = []
if edges is None:
__lowercase = []
for vertex in vertices:
g.add_vertex(lowerCAmelCase__ )
for edge in edges:
g.add_edge(*lowerCAmelCase__ )
return g
class _UpperCamelCase :
"""simple docstring"""
def __init__( self ) -> int:
'''simple docstring'''
__lowercase = {}
__lowercase = {}
def __len__( self ) -> Any:
'''simple docstring'''
return len(self.parent )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[str]:
'''simple docstring'''
if item in self.parent:
return self.find(lowerCAmelCase__ )
__lowercase = item
__lowercase = 0
return item
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
if item not in self.parent:
return self.make_set(lowerCAmelCase__ )
if item != self.parent[item]:
__lowercase = self.find(self.parent[item] )
return self.parent[item]
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = self.find(lowerCAmelCase__ )
__lowercase = self.find(lowerCAmelCase__ )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
__lowercase = roota
return roota
if self.rank[roota] < self.rank[roota]:
__lowercase = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
__lowercase = roota
return roota
return None
@staticmethod
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ) -> str:
'''simple docstring'''
__lowercase = graph.num_vertices
__lowercase = Graph.UnionFind()
__lowercase = []
while num_components > 1:
__lowercase = {}
for vertex in graph.get_vertices():
__lowercase = -1
__lowercase = graph.get_edges()
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
edges.remove((tail, head, weight) )
for edge in edges:
__lowercase , __lowercase , __lowercase = edge
__lowercase = union_find.find(lowerCAmelCase__ )
__lowercase = union_find.find(lowerCAmelCase__ )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowercase = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
__lowercase = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
__lowercase , __lowercase , __lowercase = cheap_edge[vertex]
if union_find.find(lowerCAmelCase__ ) != union_find.find(lowerCAmelCase__ ):
union_find.union(lowerCAmelCase__ , lowerCAmelCase__ )
mst_edges.append(cheap_edge[vertex] )
__lowercase = num_components - 1
__lowercase = Graph.build(edges=lowerCAmelCase__ )
return mst | 210 | from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
__a : Optional[Any] = logging.get_logger(__name__)
__a : List[str] = TypeVar("""DatasetType""", Dataset, IterableDataset)
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = "first_exhausted" , ):
"""simple docstring"""
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('''Unable to interleave an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F"{stopping_strategy} is not supported. Please enter a valid stopping_strategy." )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
else:
return _interleave_iterable_datasets(
lowercase , lowercase , lowercase , info=lowercase , split=lowercase , stopping_strategy=lowercase )
def UpperCAmelCase ( lowercase , lowercase = None , lowercase = None , lowercase = 0 , ):
"""simple docstring"""
if not dsets:
raise ValueError('''Unable to concatenate an empty list of datasets.''' )
for i, dataset in enumerate(lowercase ):
if not isinstance(lowercase , (Dataset, IterableDataset) ):
if isinstance(lowercase , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} "
'''is an empty dataset dictionary.''' )
raise ValueError(
F"Dataset at position {i} has at least one split: {list(lowercase )}\n"
F"Please pick one to interleave with the other datasets, for example: dataset['{next(iter(lowercase ) )}']" )
raise ValueError(
F"Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(lowercase ).__name__}." )
if i == 0:
__lowercase , __lowercase = (
(Dataset, IterableDataset) if isinstance(lowercase , lowercase ) else (IterableDataset, Dataset)
)
elif not isinstance(lowercase , lowercase ):
raise ValueError(
F"Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects." )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase )
else:
return _concatenate_iterable_datasets(lowercase , info=lowercase , split=lowercase , axis=lowercase ) | 210 | 1 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
A_ = mf_knapsack(i - 1, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
else:
A_ = max(
mf_knapsack(i - 1, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ), mf_knapsack(i - 1, UpperCAmelCase__, UpperCAmelCase__, j - wt[i - 1] ) + val[i - 1], )
A_ = val
return f[i][j]
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Optional[int]:
A_ = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1, n + 1 ):
for w_ in range(1, w + 1 ):
if wt[i - 1] <= w_:
A_ = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]], dp[i - 1][w_] )
else:
A_ = dp[i - 1][w_]
return dp[n][w_], dp
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> Union[str, Any]:
if not (isinstance(UpperCAmelCase__, (list, tuple) ) and isinstance(UpperCAmelCase__, (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
A_ = len(UpperCAmelCase__ )
if num_items != len(UpperCAmelCase__ ):
A_ = (
"""The number of weights must be the same as the number of values.\n"""
F'''But got {num_items} weights and {len(UpperCAmelCase__ )} values'''
)
raise ValueError(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
if not isinstance(wt[i], UpperCAmelCase__ ):
A_ = (
"""All weights must be integers but got weight of """
F'''type {type(wt[i] )} at index {i}'''
)
raise TypeError(UpperCAmelCase__ )
A_ , A_ = knapsack(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
A_ = set()
_construct_solution(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
return optimal_val, example_optional_set
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ ) -> int:
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCAmelCase__, UpperCAmelCase__, i - 1, UpperCAmelCase__, UpperCAmelCase__ )
else:
optimal_set.add(UpperCAmelCase__ )
_construct_solution(UpperCAmelCase__, UpperCAmelCase__, i - 1, j - wt[i - 1], UpperCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase = [3, 2, 4, 4]
__lowerCamelCase = [4, 3, 2, 3]
__lowerCamelCase = 4
__lowerCamelCase = 6
__lowerCamelCase = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
__lowerCamelCase , __lowerCamelCase = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
__lowerCamelCase , __lowerCamelCase = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('''optimal_value = ''', optimal_solution)
print('''An optimal subset corresponding to the optimal value''', optimal_subset)
| 101 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class A__ ( _snake_case ):
lowercase = "summarization"
lowercase = ["loss"]
lowercase = ROUGE_KEYS
lowercase = "rouge2"
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if hparams.sortish_sampler and hparams.gpus > 1:
A_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(UpperCamelCase__ , num_labels=UpperCamelCase__ , mode=self.mode , **UpperCamelCase__ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
A_ = Path(self.output_dir ) / """metrics.json"""
A_ = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
A_ = 0
A_ = defaultdict(UpperCamelCase__ )
A_ = self.config.model_type
A_ = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
A_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
A_ = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
A_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
A_ = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
A_ = get_git_info()["""repo_sha"""]
A_ = hparams.num_workers
A_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , UpperCamelCase__ ):
A_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
A_ = self.decoder_start_token_id
A_ = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
A_ = False
A_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
A_ = self.hparams.eval_max_gen_length
else:
A_ = self.model.config.max_length
A_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def snake_case_ ( self , UpperCamelCase__ ) -> Dict[str, List[str]]:
'''simple docstring'''
A_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(UpperCamelCase__ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
A_ = True
return readable_batch
def snake_case_ ( self , UpperCamelCase__ , **UpperCamelCase__ ) -> str:
'''simple docstring'''
return self.model(UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
A_ = self.tokenizer.batch_decode(
UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ )
return lmap(str.strip , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = self.tokenizer.pad_token_id
A_ , A_ = batch["""input_ids"""], batch["""attention_mask"""]
A_ = batch["""labels"""]
if isinstance(self.model , UpperCamelCase__ ):
A_ = self.model._shift_right(UpperCamelCase__ )
else:
A_ = shift_tokens_right(UpperCamelCase__ , UpperCamelCase__ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
A_ = decoder_input_ids
self.save_readable_batch(UpperCamelCase__ )
A_ = self(UpperCamelCase__ , attention_mask=UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ , use_cache=UpperCamelCase__ )
A_ = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
A_ = nn.CrossEntropyLoss(ignore_index=UpperCamelCase__ )
assert lm_logits.shape[-1] == self.vocab_size
A_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
A_ = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
A_ , A_ = label_smoothed_nll_loss(
UpperCamelCase__ , UpperCamelCase__ , self.hparams.label_smoothing , ignore_index=UpperCamelCase__ )
return (loss,)
@property
def snake_case_ ( self ) -> int:
'''simple docstring'''
return self.tokenizer.pad_token_id
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
A_ = self._step(UpperCamelCase__ )
A_ = dict(zip(self.loss_names , UpperCamelCase__ ) )
# tokens per batch
A_ = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
A_ = batch["""input_ids"""].shape[0]
A_ = batch["""input_ids"""].eq(self.pad ).sum()
A_ = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__="val" ) -> Dict:
'''simple docstring'''
self.step_count += 1
A_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
A_ = losses["""loss"""]
A_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
A_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
A_ = torch.tensor(UpperCamelCase__ ).type_as(UpperCamelCase__ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(UpperCamelCase__ )
A_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
A_ = self.step_count
self.metrics[prefix].append(UpperCamelCase__ ) # callback writes this to self.metrics_save_path
A_ = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return calculate_rouge(UpperCamelCase__ , UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> dict:
'''simple docstring'''
A_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
A_ = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=UpperCamelCase__ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
A_ = (time.time() - ta) / batch["""input_ids"""].shape[0]
A_ = self.ids_to_clean_text(UpperCamelCase__ )
A_ = self.ids_to_clean_text(batch["""labels"""] )
A_ = self._step(UpperCamelCase__ )
A_ = dict(zip(self.loss_names , UpperCamelCase__ ) )
A_ = self.calc_generative_metrics(UpperCamelCase__ , UpperCamelCase__ )
A_ = np.mean(lmap(UpperCamelCase__ , UpperCamelCase__ ) )
base_metrics.update(gen_time=UpperCamelCase__ , gen_len=UpperCamelCase__ , preds=UpperCamelCase__ , target=UpperCamelCase__ , **UpperCamelCase__ )
return base_metrics
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self._generative_step(UpperCamelCase__ )
def snake_case_ ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
return self.validation_epoch_end(UpperCamelCase__ , prefix="""test""" )
def snake_case_ ( self , UpperCamelCase__ ) -> SeqaSeqDataset:
'''simple docstring'''
A_ = self.n_obs[type_path]
A_ = self.target_lens[type_path]
A_ = self.dataset_class(
self.tokenizer , type_path=UpperCamelCase__ , n_obs=UpperCamelCase__ , max_target_length=UpperCamelCase__ , **self.dataset_kwargs , )
return dataset
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
'''simple docstring'''
A_ = self.get_dataset(UpperCamelCase__ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
A_ = dataset.make_sortish_sampler(UpperCamelCase__ , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
A_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=dataset.collate_fn , shuffle=UpperCamelCase__ , num_workers=self.num_workers , sampler=UpperCamelCase__ , )
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
A_ = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=UpperCamelCase__ )
return dataloader
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def snake_case_ ( self ) -> DataLoader:
'''simple docstring'''
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def snake_case_ ( UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
add_generic_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"""--max_source_length""" , default=1024 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=56 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=142 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=142 , type=UpperCamelCase__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=UpperCamelCase__ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=UpperCamelCase__ )
parser.add_argument("""--max_tokens_per_batch""" , type=UpperCamelCase__ , default=UpperCamelCase__ )
parser.add_argument("""--logger_name""" , type=UpperCamelCase__ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=UpperCamelCase__ , default=500 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=UpperCamelCase__ , default="""summarization""" , required=UpperCamelCase__ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=UpperCamelCase__ , default=0.0 , required=UpperCamelCase__ )
parser.add_argument("""--src_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ )
parser.add_argument("""--tgt_lang""" , type=UpperCamelCase__ , default="""""" , required=UpperCamelCase__ )
parser.add_argument("""--eval_beams""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ )
parser.add_argument(
"""--val_metric""" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=UpperCamelCase__ , default=UpperCamelCase__ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=UpperCamelCase__ , default=1 , required=UpperCamelCase__ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=UpperCamelCase__ , default=-1 , required=UpperCamelCase__ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class A__ ( _snake_case ):
lowercase = "translation"
lowercase = ["loss"]
lowercase = ["bleu"]
lowercase = "bleu"
def __init__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__(UpperCamelCase__ , **UpperCamelCase__ )
A_ = hparams.src_lang
A_ = hparams.tgt_lang
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> dict:
'''simple docstring'''
return calculate_bleu(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=UpperCAmelCase__ )
check_output_dir(UpperCAmelCase__, expected_items=3 )
if model is None:
if "summarization" in args.task:
A_ = SummarizationModule(UpperCAmelCase__ )
else:
A_ = TranslationModule(UpperCAmelCase__ )
A_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
A_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
A_ = os.environ.get("""WANDB_PROJECT""", UpperCAmelCase__ )
A_ = WandbLogger(name=model.output_dir.name, project=UpperCAmelCase__ )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
A_ = WandbLogger(name=model.output_dir.name, project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
A_ = get_early_stopping_callback(model.val_metric, args.early_stopping_patience )
else:
A_ = False
A_ = args.val_metric == """loss"""
A_ = generic_train(
UpperCAmelCase__, UpperCAmelCase__, logging_callback=SeqaSeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(
args.output_dir, model.val_metric, args.save_top_k, UpperCAmelCase__ ), early_stopping_callback=UpperCAmelCase__, logger=UpperCAmelCase__, )
pickle_save(model.hparams, model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
A_ = """"""
A_ = sorted(glob.glob(os.path.join(args.output_dir, """*.ckpt""" ), recursive=UpperCAmelCase__ ) )
if checkpoints:
A_ = checkpoints[-1]
A_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 101 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[int] = """gptsan-japanese"""
a_ : Any = [
"""past_key_values""",
]
a_ : Any = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[Any] , a_ : Optional[int]=3_60_00 , a_ : List[str]=12_80 , a_ : int=10_24 , a_ : List[Any]=81_92 , a_ : Tuple=40_96 , a_ : Dict=1_28 , a_ : Union[str, Any]=10 , a_ : str=0 , a_ : Optional[int]=16 , a_ : Tuple=16 , a_ : List[Any]=1_28 , a_ : List[Any]=0.0 , a_ : str=1e-5 , a_ : Optional[Any]=False , a_ : Union[str, Any]=0.0 , a_ : Optional[int]="float32" , a_ : List[Any]=False , a_ : str=False , a_ : Optional[int]=False , a_ : Any=0.002 , a_ : Optional[Any]=False , a_ : Optional[Any]=True , a_ : Optional[int]=3_59_98 , a_ : Dict=3_59_95 , a_ : int=3_59_99 , **a_ : Any , ):
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : str = max_position_embeddings
lowerCAmelCase_ : Any = d_model
lowerCAmelCase_ : Tuple = d_ff
lowerCAmelCase_ : Any = d_ext
lowerCAmelCase_ : Union[str, Any] = d_spout
lowerCAmelCase_ : Tuple = num_switch_layers
lowerCAmelCase_ : int = num_ext_layers
lowerCAmelCase_ : str = num_switch_layers + num_ext_layers
lowerCAmelCase_ : List[str] = num_heads
lowerCAmelCase_ : Optional[int] = num_experts
lowerCAmelCase_ : str = expert_capacity
lowerCAmelCase_ : Optional[int] = dropout_rate
lowerCAmelCase_ : int = layer_norm_epsilon
lowerCAmelCase_ : int = router_bias
lowerCAmelCase_ : Any = router_jitter_noise
lowerCAmelCase_ : Tuple = router_dtype
lowerCAmelCase_ : Union[str, Any] = router_ignore_padding_tokens
lowerCAmelCase_ : Optional[int] = output_hidden_states
lowerCAmelCase_ : Optional[int] = output_attentions
lowerCAmelCase_ : Any = initializer_factor
lowerCAmelCase_ : Dict = output_router_logits
lowerCAmelCase_ : Optional[int] = use_cache
super().__init__(
separator_token_id=a_ , pad_token_id=a_ , eos_token_id=a_ , **a_ , )
| 241 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
lowercase__ = get_logger()
lowercase__ = None
class __lowerCamelCase ( TensorFormatter[Mapping, """jax.Array""", Mapping] ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : Union[str, Any]=None , a_ : Optional[Any]=None , **a_ : Tuple ):
super().__init__(features=a_ )
import jax
from jaxlib.xla_client import Device
if isinstance(a_ , a_ ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(a_ )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
lowerCAmelCase_ : List[Any] = device if isinstance(a_ , a_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase_ : Dict = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
lowerCAmelCase_ : Tuple = str(jax.devices()[0] )
lowerCAmelCase_ : Dict = jnp_array_kwargs
@staticmethod
def lowerCamelCase ( ):
import jax
return {str(a_ ): device for device in jax.devices()}
def lowerCamelCase ( self : Tuple , a_ : Dict ):
import jax
import jax.numpy as jnp
if isinstance(a_ , a_ ) and column:
if all(
isinstance(a_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(a_ , axis=0 )
return column
def lowerCamelCase ( self : Tuple , a_ : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(a_ , (str, bytes, type(a_ )) ):
return value
elif isinstance(a_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCAmelCase_ : Any = {}
if isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
lowerCAmelCase_ : List[Any] = {"dtype": jnp.intaa}
else:
lowerCAmelCase_ : Tuple = {"dtype": jnp.intaa}
elif isinstance(a_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCAmelCase_ : Optional[int] = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(a_ , PIL.Image.Image ):
lowerCAmelCase_ : List[str] = np.asarray(a_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
lowerCAmelCase_ : Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(a_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCamelCase ( self : Any , a_ : List[str] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(a_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(a_ , "__array__" ) and not isinstance(a_ , jax.Array ):
lowerCAmelCase_ : str = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(a_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
elif isinstance(a_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(a_ ) for substruct in data_struct] )
return self._tensorize(a_ )
def lowerCamelCase ( self : Dict , a_ : dict ):
return map_nested(self._recursive_tensorize , a_ , map_list=a_ )
def lowerCamelCase ( self : List[str] , a_ : pa.Table ):
lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_row(a_ )
lowerCAmelCase_ : Union[str, Any] = self.python_features_decoder.decode_row(a_ )
return self.recursive_tensorize(a_ )
def lowerCamelCase ( self : Any , a_ : pa.Table ):
lowerCAmelCase_ : Dict = self.numpy_arrow_extractor().extract_column(a_ )
lowerCAmelCase_ : Optional[int] = self.python_features_decoder.decode_column(a_ , pa_table.column_names[0] )
lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ )
lowerCAmelCase_ : Optional[Any] = self._consolidate(a_ )
return column
def lowerCamelCase ( self : Tuple , a_ : pa.Table ):
lowerCAmelCase_ : Tuple = self.numpy_arrow_extractor().extract_batch(a_ )
lowerCAmelCase_ : Tuple = self.python_features_decoder.decode_batch(a_ )
lowerCAmelCase_ : List[str] = self.recursive_tensorize(a_ )
for column_name in batch:
lowerCAmelCase_ : Tuple = self._consolidate(batch[column_name] )
return batch
| 241 | 1 |
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def a__( lowerCAmelCase ) -> List[str]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def a__( lowerCAmelCase , lowerCAmelCase ) -> Any:
UpperCAmelCase__ : Optional[Any] = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
UpperCAmelCase__ : Dict = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""" )
UpperCAmelCase__ : Dict = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""" )
UpperCAmelCase__ : Optional[int] = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""" )
UpperCAmelCase__ : Optional[Any] = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""" )
UpperCAmelCase__ : str = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""" )
UpperCAmelCase__ : int = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""" )
UpperCAmelCase__ : Any = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""" )
UpperCAmelCase__ : Tuple = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""" )
UpperCAmelCase__ : Dict = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""" )
UpperCAmelCase__ : List[Any] = key.replace("""image_encoder.module""" , """flava.image_model""" )
UpperCAmelCase__ : str = key.replace("""text_encoder.module""" , """flava.text_model""" )
UpperCAmelCase__ : Any = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""" )
UpperCAmelCase__ : str = key.replace("""mm_encoder.module""" , """flava.multimodal_model""" )
UpperCAmelCase__ : List[Any] = key.replace("""text_projection""" , """flava.text_projection""" )
UpperCAmelCase__ : int = key.replace("""image_projection""" , """flava.image_projection""" )
UpperCAmelCase__ : List[Any] = value.float()
for key, value in codebook_state_dict.items():
UpperCAmelCase__ : Optional[Any] = value
return upgrade
@torch.no_grad()
def a__( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ) -> Dict:
if config_path is not None:
UpperCAmelCase__ : List[str] = FlavaConfig.from_pretrained(lowerCAmelCase )
else:
UpperCAmelCase__ : List[str] = FlavaConfig()
UpperCAmelCase__ : Optional[int] = FlavaForPreTraining(lowerCAmelCase ).eval()
UpperCAmelCase__ : List[str] = convert_dalle_checkpoint(lowerCAmelCase , lowerCAmelCase , save_checkpoint=lowerCAmelCase )
if os.path.exists(lowerCAmelCase ):
UpperCAmelCase__ : str = torch.load(lowerCAmelCase , map_location="""cpu""" )
else:
UpperCAmelCase__ : str = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location="""cpu""" )
UpperCAmelCase__ : str = upgrade_state_dict(lowerCAmelCase , lowerCAmelCase )
hf_model.load_state_dict(lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = hf_model.state_dict()
UpperCAmelCase__ : Any = count_parameters(lowerCAmelCase )
UpperCAmelCase__ : Any = count_parameters(lowerCAmelCase ) + count_parameters(lowerCAmelCase )
assert torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
hf_model.save_pretrained(lowerCAmelCase )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
_A = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 366 |
"""simple docstring"""
from math import factorial, radians
def a__ ( lowerCAmelCase , lowerCAmelCase = 18 , lowerCAmelCase = 10 ) -> float:
UpperCAmelCase__ : List[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
UpperCAmelCase__ : Any = radians(lowerCAmelCase )
UpperCAmelCase__ : Optional[int] = angle_in_radians
UpperCAmelCase__ : Tuple = 3
UpperCAmelCase__ : str = -1
for _ in range(lowerCAmelCase ):
result += (b * (angle_in_radians**a)) / factorial(lowerCAmelCase )
UpperCAmelCase__ : int = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 166 | 0 |
import sys
lowercase__ :Union[str, Any] = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def UpperCamelCase ( lowerCAmelCase__ = N ):
'''simple docstring'''
lowercase = -sys.maxsize - 1
for i in range(len(lowerCAmelCase__ ) - 12 ):
lowercase = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowercase = product
return largest_product
if __name__ == "__main__":
print(F'{solution() = }')
| 101 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'MIT/ast-finetuned-audioset-10-10-0.4593': (
'https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="audio-spectrogram-transformer"
def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=16 , UpperCamelCase_=True , UpperCamelCase_=10 , UpperCamelCase_=10 , UpperCamelCase_=10_24 , UpperCamelCase_=1_28 , **UpperCamelCase_ , ) -> Optional[int]:
super().__init__(**UpperCamelCase_ )
__lowercase : Optional[Any] = hidden_size
__lowercase : List[str] = num_hidden_layers
__lowercase : List[str] = num_attention_heads
__lowercase : Dict = intermediate_size
__lowercase : List[str] = hidden_act
__lowercase : Union[str, Any] = hidden_dropout_prob
__lowercase : Optional[Any] = attention_probs_dropout_prob
__lowercase : Dict = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Optional[int] = patch_size
__lowercase : List[str] = qkv_bias
__lowercase : Union[str, Any] = frequency_stride
__lowercase : List[Any] = time_stride
__lowercase : Tuple = max_length
__lowercase : int = num_mel_bins
| 249 | 0 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
a_ : str = logging.get_logger(__name__)
a_ : List[Any] = {'vocab_file': 'spiece.model'}
a_ : Union[str, Any] = {
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
a_ : List[Any] = {
'albert-base-v1': 5_12,
'albert-large-v1': 5_12,
'albert-xlarge-v1': 5_12,
'albert-xxlarge-v1': 5_12,
'albert-base-v2': 5_12,
'albert-large-v2': 5_12,
'albert-xlarge-v2': 5_12,
'albert-xxlarge-v2': 5_12,
}
a_ : Any = 'โ'
class _snake_case ( A__ ):
_lowercase : Union[str, Any] = VOCAB_FILES_NAMES
_lowercase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_lowercase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , a , a=True , a=True , a=False , a="[CLS]" , a="[SEP]" , a="<unk>" , a="[SEP]" , a="<pad>" , a="[CLS]" , a="[MASK]" , a = None , **a , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
SCREAMING_SNAKE_CASE = (
AddedToken(a , lstrip=a , rstrip=a , normalized=a)
if isinstance(a , a)
else mask_token
)
SCREAMING_SNAKE_CASE = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
SCREAMING_SNAKE_CASE = do_lower_case
SCREAMING_SNAKE_CASE = remove_space
SCREAMING_SNAKE_CASE = keep_accents
SCREAMING_SNAKE_CASE = vocab_file
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(a)
@property
def SCREAMING_SNAKE_CASE__ ( self) -> Tuple:
return len(self.sp_model)
def SCREAMING_SNAKE_CASE__ ( self) -> str:
SCREAMING_SNAKE_CASE = {self.convert_ids_to_tokens(a): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self) -> Optional[int]:
SCREAMING_SNAKE_CASE = self.__dict__.copy()
SCREAMING_SNAKE_CASE = None
return state
def __setstate__( self , a) -> Dict:
SCREAMING_SNAKE_CASE = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[Any]:
if self.remove_space:
SCREAMING_SNAKE_CASE = ' '.join(inputs.strip().split())
else:
SCREAMING_SNAKE_CASE = inputs
SCREAMING_SNAKE_CASE = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
SCREAMING_SNAKE_CASE = unicodedata.normalize('NFKD' , a)
SCREAMING_SNAKE_CASE = ''.join([c for c in outputs if not unicodedata.combining(a)])
if self.do_lower_case:
SCREAMING_SNAKE_CASE = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE__ ( self , a) -> List[str]:
SCREAMING_SNAKE_CASE = self.preprocess_text(a)
SCREAMING_SNAKE_CASE = self.sp_model.encode(a , out_type=a)
SCREAMING_SNAKE_CASE = []
for piece in pieces:
if len(a) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
SCREAMING_SNAKE_CASE = self.sp_model.EncodeAsPieces(piece[:-1].replace(a , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
SCREAMING_SNAKE_CASE = cur_pieces[1:]
else:
SCREAMING_SNAKE_CASE = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(a)
else:
new_pieces.append(a)
return new_pieces
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
return self.sp_model.PieceToId(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> str:
return self.sp_model.IdToPiece(a)
def SCREAMING_SNAKE_CASE__ ( self , a) -> Optional[Any]:
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a) + token
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = []
else:
current_sub_tokens.append(a)
SCREAMING_SNAKE_CASE = False
out_string += self.sp_model.decode(a)
return out_string.strip()
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = False) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a)
if token_ids_a is not None:
return [1] + ([0] * len(a)) + [1] + ([0] * len(a)) + [1]
return [1] + ([0] * len(a)) + [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> List[int]:
SCREAMING_SNAKE_CASE = [self.sep_token_id]
SCREAMING_SNAKE_CASE = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def SCREAMING_SNAKE_CASE__ ( self , a , a = None) -> Tuple[str]:
if not os.path.isdir(a):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''')
return
SCREAMING_SNAKE_CASE = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(a) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , a)
elif not os.path.isfile(self.vocab_file):
with open(a , 'wb') as fi:
SCREAMING_SNAKE_CASE = self.sp_model.serialized_model_proto()
fi.write(a)
return (out_vocab_file,)
| 327 |
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
a_ : Union[str, Any] = logging.get_logger(__name__)
class _snake_case ( A__ ):
_lowercase : List[str] = ['''pixel_values''']
def __init__( self , a = True , a = 1 / 255 , a = True , a = 8 , **a , ) -> None:
super().__init__(**a)
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_pad
SCREAMING_SNAKE_CASE = pad_size
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None , **a) -> np.ndarray:
return rescale(a , scale=a , data_format=a , **a)
def SCREAMING_SNAKE_CASE__ ( self , a , a , a = None) -> List[str]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = get_image_size(a)
SCREAMING_SNAKE_CASE = (old_height // size + 1) * size - old_height
SCREAMING_SNAKE_CASE = (old_width // size + 1) * size - old_width
return pad(a , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a)
def SCREAMING_SNAKE_CASE__ ( self , a , a = None , a = None , a = None , a = None , a = None , a = ChannelDimension.FIRST , **a , ) -> List[str]:
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_pad if do_pad is not None else self.do_pad
SCREAMING_SNAKE_CASE = pad_size if pad_size is not None else self.pad_size
SCREAMING_SNAKE_CASE = make_list_of_images(a)
if not valid_images(a):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(a) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=a , scale=a) for image in images]
if do_pad:
SCREAMING_SNAKE_CASE = [self.pad(a , size=a) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(a , a) for image in images]
SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a)
| 327 | 1 |
"""simple docstring"""
import numpy as np
from PIL import Image
def lowercase ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int ):
lowercase_ : Tuple = np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowercase_ : Dict = 0
lowercase_ : Any = 0
lowercase_ : List[str] = 0
lowercase_ : Union[str, Any] = 0
# compute the shape of the output matrix
lowercase_ : Optional[int] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
lowercase_ : Union[str, Any] = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
lowercase_ : Tuple = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase_ : Any = 0
lowercase_ : Optional[Any] = 0
return updated_arr
def lowercase ( __snake_case : np.ndarray , __snake_case : int , __snake_case : int ):
lowercase_ : int = np.array(__snake_case )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
lowercase_ : int = 0
lowercase_ : Dict = 0
lowercase_ : Tuple = 0
lowercase_ : Tuple = 0
# compute the shape of the output matrix
lowercase_ : List[str] = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
lowercase_ : Optional[int] = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
lowercase_ : str = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
lowercase_ : int = 0
lowercase_ : str = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='''avgpooling''', verbose=True)
# Loading the image
__A : List[Any] = Image.open('''path_to_image''')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 33 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
__UpperCamelCase : int = 299792458
# Symbols
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Optional[int] = symbols("""ct x y z""")
def a_ ( _A ) -> float:
"""simple docstring"""
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def a_ ( _A ) -> float:
"""simple docstring"""
return 1 / sqrt(1 - beta(_A ) ** 2 )
def a_ ( _A ) -> np.ndarray:
"""simple docstring"""
return np.array(
[
[gamma(_A ), -gamma(_A ) * beta(_A ), 0, 0],
[-gamma(_A ) * beta(_A ), gamma(_A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def a_ ( _A , _A = None ) -> np.ndarray:
"""simple docstring"""
# Ensure event is not empty
if event is None:
snake_case__ = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(_A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
__UpperCamelCase : List[Any] = transform(29979245)
print("""Example of four vector: """)
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
__UpperCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
__UpperCamelCase : Tuple = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 307 | 0 |
def lowerCamelCase_ ( _a : list ):
'''simple docstring'''
if len(_snake_case ) <= 1:
return [tuple(_snake_case )]
UpperCAmelCase_ : str = []
def generate(_a : int , _a : list ):
UpperCAmelCase_ : Any = [0] * n
res.append(tuple(_snake_case ) )
UpperCAmelCase_ : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
UpperCAmelCase_ : Optional[int] = arr[i], arr[0]
else:
UpperCAmelCase_ : Optional[Any] = arr[i], arr[c[i]]
res.append(tuple(_snake_case ) )
c[i] += 1
UpperCAmelCase_ : str = 0
else:
UpperCAmelCase_ : Optional[int] = 0
i += 1
generate(len(_snake_case ) , _snake_case )
return res
if __name__ == "__main__":
UpperCamelCase_ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase_ = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 369 |
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = '''pytorch_model.bin'''
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
A__ : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "A csv or a json file containing the validation data."} )
A__ : Optional[str] = dataclasses.field(
default=__snake_case , metadata={"help": "The name of the task to train on."} , )
A__ : Optional[List[str]] = dataclasses.field(
default=__snake_case , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class _snake_case :
'''simple docstring'''
A__ : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
A__ : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
A__ : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
A__ : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
A__ : Optional[bool] = dataclasses.field(
default=__snake_case , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
A__ : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
A__ : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
A__ : Optional[int] = dataclasses.field(
default=__snake_case , metadata={"help": "Random seed for initialization."} , )
def lowerCamelCase_ ( _a : str , _a : List[Any] , _a : List[Any] , _a : Dict , _a : int , _a : Tuple ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase_ : List[str] = dataset.filter(lambda _a : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase_ : List[str] = int(eval_result * len(_a ) )
print(_a )
UpperCAmelCase_ : int = dataset.sort("""probability""" , reverse=_a )
UpperCAmelCase_ : Optional[int] = dataset.select(range(_a ) )
UpperCAmelCase_ : List[str] = dataset.remove_columns(["""label""", """probability"""] )
UpperCAmelCase_ : Optional[Any] = dataset.rename_column("""prediction""" , """label""" )
UpperCAmelCase_ : Union[str, Any] = dataset.map(lambda _a : {"label": idalabel[example["label"]]} )
UpperCAmelCase_ : int = dataset.shuffle(seed=args.seed )
UpperCAmelCase_ : int = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.data_file_extension == "csv":
dataset.to_csv(_a , index=_a )
else:
dataset.to_json(_a )
def lowerCamelCase_ ( _a : Any , _a : int , _a : Dict , _a : List[Any] , **_a : int ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase_ : Tuple = STModelArguments(model_name_or_path=_a )
UpperCAmelCase_ : str = STDataArguments(train_file=_a , infer_file=_a )
UpperCAmelCase_ : Optional[Any] = STTrainingArguments(output_dir=_a )
UpperCAmelCase_ : Optional[Any] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_a ).items():
setattr(_a , _a , _a )
for key, value in kwargs.items():
if hasattr(_a , _a ):
setattr(_a , _a , _a )
# Sanity checks
UpperCAmelCase_ : List[str] = {}
UpperCAmelCase_ : Any = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase_ : List[Any] = args.train_file
UpperCAmelCase_ : Tuple = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase_ : Dict = args.eval_file
for key in data_files:
UpperCAmelCase_ : List[str] = data_files[key].split(""".""" )[-1]
assert extension in ["csv", "json"], F'''`{key}_file` should be a csv or a json file.'''
if args.data_file_extension is None:
UpperCAmelCase_ : int = extension
else:
assert extension == args.data_file_extension, F'''`{key}_file` should be a {args.data_file_extension} file`.'''
assert (
args.eval_metric in datasets.list_metrics()
), F'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.'''
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info("""Creating the initial data directory for self-training...""" )
UpperCAmelCase_ : int = F'''{args.output_dir}/self-train_iter-{{}}'''.format
UpperCAmelCase_ : List[Any] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_a )
os.makedirs(_a , exist_ok=_a )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : List[Any] = False
# Show the progress bar
UpperCAmelCase_ : List[str] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase_ : Any = data_dir_format(_a )
assert os.path.exists(_a )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase_ : List[str] = os.path.join(_a , """stage-1""" )
UpperCAmelCase_ : Optional[int] = {
"""accelerator""": accelerator,
"""model_name_or_path""": args.model_name_or_path,
"""cache_dir""": args.cache_dir,
"""do_train""": True,
"""train_file""": data_files["""train"""] if iteration == 0 else data_files["""train_pseudo"""],
"""do_eval""": True if args.eval_file is not None else False,
"""eval_file""": data_files["""eval"""],
"""do_predict""": True,
"""infer_file""": data_files["""infer"""],
"""task_name""": args.task_name,
"""label_list""": args.label_list,
"""output_dir""": current_output_dir,
"""eval_metric""": args.eval_metric,
"""evaluation_strategy""": args.evaluation_strategy,
"""early_stopping_patience""": args.early_stopping_patience,
"""early_stopping_threshold""": args.early_stopping_threshold,
"""seed""": args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_a , _a ):
arguments_dict.update({key: value} )
UpperCAmelCase_ : Any = os.path.join(_a , """best-checkpoint""" , _a )
if os.path.exists(_a ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.""" , _a , _a , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 1 *****""" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("""Self-training job completed: iteration: %d, stage: 1.""" , _a )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase_ : Dict = os.path.join(_a , """best-checkpoint""" )
UpperCAmelCase_ : str = os.path.join(_a , """stage-2""" )
# Update arguments_dict
UpperCAmelCase_ : Union[str, Any] = model_path
UpperCAmelCase_ : Dict = data_files["""train"""]
UpperCAmelCase_ : List[str] = current_output_dir
UpperCAmelCase_ : str = os.path.join(_a , """best-checkpoint""" , _a )
if os.path.exists(_a ):
logger.info(
"""Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.""" , _a , _a , )
else:
logger.info("""***** Running self-training: iteration: %d, stage: 2 *****""" , _a )
finetune(**_a )
accelerator.wait_for_everyone()
assert os.path.exists(_a )
logger.info("""Self-training job completed: iteration: %d, stage: 2.""" , _a )
UpperCAmelCase_ : Optional[Any] = iteration
UpperCAmelCase_ : List[str] = data_dir_format(iteration + 1 )
UpperCAmelCase_ : List[str] = AutoConfig.from_pretrained(os.path.join(_a , """best-checkpoint""" ) )
UpperCAmelCase_ : str = config.idalabel
UpperCAmelCase_ : Union[str, Any] = os.path.join(_a , """eval_results_best-checkpoint.json""" )
UpperCAmelCase_ : int = os.path.join(_a , """test_results_best-checkpoint.json""" )
assert os.path.exists(_a )
with open(_a , """r""" ) as f:
UpperCAmelCase_ : Optional[int] = float(json.load(_a )[args.eval_metric] )
UpperCAmelCase_ : Dict = os.path.join(_a , """infer_output_best-checkpoint.csv""" )
assert os.path.exists(_a )
# Loading the dataset from local csv or json files.
UpperCAmelCase_ : Optional[Any] = load_dataset(args.data_file_extension , data_files={"""data""": data_files["""infer"""]} )["""data"""]
UpperCAmelCase_ : List[str] = load_dataset("""csv""" , data_files={"""data""": infer_output_file} )["""data"""]
if accelerator.is_main_process:
os.makedirs(_a , exist_ok=_a )
shutil.copy(_a , os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) )
if os.path.exists(_a ):
shutil.copy(_a , os.path.join(_a , F'''test_results_iter-{iteration}.json''' ) )
create_pseudo_labeled_data(_a , _a , _a , _a , _a , _a )
accelerator.wait_for_everyone()
UpperCAmelCase_ : Tuple = os.path.join(_a , F'''train_pseudo.{args.data_file_extension}''' )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase_ : Optional[Any] = eval_result
if best_iteration is None:
UpperCAmelCase_ : Optional[int] = new_iteration
UpperCAmelCase_ : Union[str, Any] = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase_ : List[str] = new_iteration
UpperCAmelCase_ : Union[str, Any] = new_eval_result
UpperCAmelCase_ : int = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase_ : Dict = new_iteration
UpperCAmelCase_ : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase_ : List[Any] = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info("""Best iteration: %d""" , _a )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{iteration}.json''' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , )
else:
# Assume that the last iteration is the best
logger.info("""Best iteration: %d""" , args.max_selftrain_iterations - 1 )
logger.info("""Best evaluation result: %s = %f""" , args.eval_metric , _a )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_a , F'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''' ) , os.path.join(_a , """eval_results_best-iteration.json""" ) , )
| 59 | 0 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("1.0.0a"):
raise Exception("requires fairseq >= 1.0.0a")
logging.set_verbosity_info()
lowercase__ :int = logging.get_logger(__name__)
lowercase__ :Union[str, Any] = "Hello world! cรฉcรฉ herlolip"
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
'''simple docstring'''
lowercase = FairseqRobertaModel.from_pretrained(lowerCAmelCase__ )
roberta.eval() # disable dropout
lowercase = roberta.model.encoder.sentence_encoder
lowercase = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , lowerCAmelCase__ )
lowercase = XLMRobertaXLForSequenceClassification(lowerCAmelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCAmelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase = roberta_sent_encoder.embed_tokens.weight
lowercase = roberta_sent_encoder.embed_positions.weight
lowercase = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase = roberta_sent_encoder.layer_norm.weight
lowercase = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase = model.roberta.encoder.layer[i]
lowercase = roberta_sent_encoder.layers[i]
lowercase = layer.attention
lowercase = roberta_layer.self_attn_layer_norm.weight
lowercase = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase = roberta_layer.self_attn.q_proj.weight
lowercase = roberta_layer.self_attn.q_proj.bias
lowercase = roberta_layer.self_attn.k_proj.weight
lowercase = roberta_layer.self_attn.k_proj.bias
lowercase = roberta_layer.self_attn.v_proj.weight
lowercase = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase = roberta_layer.self_attn.out_proj.weight
lowercase = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase = roberta_layer.final_layer_norm.weight
lowercase = roberta_layer.final_layer_norm.bias
# intermediate
lowercase = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase = roberta_layer.fca.weight
lowercase = roberta_layer.fca.bias
# output
lowercase = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase = roberta_layer.fca.weight
lowercase = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''].dense.weight
lowercase = roberta.model.classification_heads['''mnli'''].dense.bias
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.weight
lowercase = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
lowercase = roberta.model.encoder.lm_head.dense.weight
lowercase = roberta.model.encoder.lm_head.dense.bias
lowercase = roberta.model.encoder.lm_head.layer_norm.weight
lowercase = roberta.model.encoder.lm_head.layer_norm.bias
lowercase = roberta.model.encoder.lm_head.weight
lowercase = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase = roberta.encode(lowerCAmelCase__ ).unsqueeze(0 ) # batch of size 1
lowercase = model(lowerCAmelCase__ )[0]
if classification_head:
lowercase = roberta.model.classification_heads['''mnli'''](roberta.extract_features(lowerCAmelCase__ ) )
else:
lowercase = roberta.model(lowerCAmelCase__ )[0]
print(our_output.shape , their_output.shape )
lowercase = torch.max(torch.abs(our_output - their_output ) ).item()
print(f'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
lowercase = torch.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''๐ฅ''' if success else '''๐ฉ''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(lowerCAmelCase__ ).mkdir(parents=lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
lowercase__ :Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--roberta_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
lowercase__ :Optional[int] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 101 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase__ :Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase ( SCREAMING_SNAKE_CASE__ ):
def __init__( self ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,A__ ,):
super().__init__()
self.register_modules(
vae=A__ ,text_encoder=A__ ,tokenizer=A__ ,unet=A__ ,scheduler=A__ ,safety_checker=A__ ,feature_extractor=A__ ,)
def A__ ( self ,A__ = "auto"):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A__)
def A__ ( self):
self.enable_attention_slicing(A__)
@torch.no_grad()
def __call__( self ,A__ ,A__ = 5_1_2 ,A__ = 5_1_2 ,A__ = 5_0 ,A__ = 7.5 ,A__ = None ,A__ = 1 ,A__ = 0.0 ,A__ = None ,A__ = None ,A__ = "pil" ,A__ = True ,A__ = None ,A__ = 1 ,A__ = None ,**A__ ,):
if isinstance(A__ ,A__):
lowercase = 1
elif isinstance(A__ ,A__):
lowercase = len(A__)
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(A__)}')
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.')
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A__ ,A__) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(A__)}.')
# get prompt text embeddings
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=self.tokenizer.model_max_length ,return_tensors='''pt''' ,)
lowercase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f' {self.tokenizer.model_max_length} tokens: {removed_text}')
lowercase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
lowercase = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
lowercase , lowercase , lowercase = text_embeddings.shape
lowercase = text_embeddings.repeat(1 ,A__ ,1)
lowercase = text_embeddings.view(bs_embed * num_images_per_prompt ,A__ ,-1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowercase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowercase = 42
if negative_prompt is None:
lowercase = ['''''']
elif type(A__) is not type(A__):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(A__)} !='
f' {type(A__)}.')
elif isinstance(A__ ,A__):
lowercase = [negative_prompt]
elif batch_size != len(A__):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(A__)}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
''' the batch size of `prompt`.''')
else:
lowercase = negative_prompt
lowercase = text_input_ids.shape[-1]
lowercase = self.tokenizer(
A__ ,padding='''max_length''' ,max_length=A__ ,truncation=A__ ,return_tensors='''pt''' ,)
lowercase = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase = uncond_embeddings.shape[1]
lowercase = uncond_embeddings.repeat(A__ ,A__ ,1)
lowercase = uncond_embeddings.view(batch_size * num_images_per_prompt ,A__ ,-1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
lowercase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4)
lowercase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
lowercase = torch.randn(
A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(self.device)
lowercase = torch.randn(A__ ,generator=A__ ,device='''cpu''' ,dtype=A__).to(
self.device)
else:
lowercase = torch.randn(
A__ ,generator=A__ ,device=self.device ,dtype=A__)
lowercase = torch.randn(A__ ,generator=A__ ,device=self.device ,dtype=A__)
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}')
lowercase = latents_reference.to(self.device)
lowercase = latents.to(self.device)
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
lowercase = (latents_shape[3] - latents_shape_reference[3]) // 2
lowercase = (latents_shape[2] - latents_shape_reference[2]) // 2
lowercase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
lowercase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
lowercase = 0 if dx < 0 else dx
lowercase = 0 if dy < 0 else dy
lowercase = max(-dx ,0)
lowercase = max(-dy ,0)
# import pdb
# pdb.set_trace()
lowercase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A__)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
lowercase = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowercase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (ฮท) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to ฮท in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowercase = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
lowercase = {}
if accepts_eta:
lowercase = eta
for i, t in enumerate(self.progress_bar(A__)):
# expand the latents if we are doing classifier free guidance
lowercase = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowercase = self.scheduler.scale_model_input(A__ ,A__)
# predict the noise residual
lowercase = self.unet(A__ ,A__ ,encoder_hidden_states=A__).sample
# perform guidance
if do_classifier_free_guidance:
lowercase , lowercase = noise_pred.chunk(2)
lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
lowercase = self.scheduler.step(A__ ,A__ ,A__ ,**A__).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A__ ,A__ ,A__)
lowercase = 1 / 0.18215 * latents
lowercase = self.vae.decode(A__).sample
lowercase = (image / 2 + 0.5).clamp(0 ,1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowercase = image.cpu().permute(0 ,2 ,3 ,1).float().numpy()
if self.safety_checker is not None:
lowercase = self.feature_extractor(self.numpy_to_pil(A__) ,return_tensors='''pt''').to(
self.device)
lowercase , lowercase = self.safety_checker(
images=A__ ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype))
else:
lowercase = None
if output_type == "pil":
lowercase = self.numpy_to_pil(A__)
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A__ ,nsfw_content_detected=A__)
| 101 | 1 |
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 50_00 ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [(i * (3 * i - 1)) // 2 for i in range(1 , __UpperCamelCase )]
for i, pentagonal_i in enumerate(__UpperCamelCase ):
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
SCREAMING_SNAKE_CASE__ = pentagonal_nums[j]
SCREAMING_SNAKE_CASE__ = pentagonal_i + pentagonal_j
SCREAMING_SNAKE_CASE__ = pentagonal_j - pentagonal_i
if is_pentagonal(__UpperCamelCase ) and is_pentagonal(__UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 204 | from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
__lowerCamelCase : int = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase_ ):
lowerCAmelCase_ = ["pixel_values"]
def __init__( self : int , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : bool = True , **_lowercase : List[Any] , ):
"""simple docstring"""
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 2_24}
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowercase , default_to_square=_lowercase )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"""height""": 2_56, """width""": 2_56}
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowercase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = resample
SCREAMING_SNAKE_CASE__ = do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_flip_channel_order
def __a ( self : List[Any] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : PILImageResampling = PIL.Image.BILINEAR , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowercase , default_to_square=_lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ = get_resize_output_image_size(_lowercase , size=size["""shortest_edge"""] , default_to_square=_lowercase )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : str , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Any , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(_lowercase , size=(size["""height"""], size["""width"""]) , data_format=_lowercase , **_lowercase )
def __a ( self : Optional[Any] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Any , ):
"""simple docstring"""
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def __a ( self : Tuple , _lowercase : np.ndarray , _lowercase : Optional[Union[str, ChannelDimension]] = None ):
"""simple docstring"""
return flip_channel_order(_lowercase , data_format=_lowercase )
def __a ( self : List[str] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Dict[str, int] = None , _lowercase : bool = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : int , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE__ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
SCREAMING_SNAKE_CASE__ = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowercase , default_to_square=_lowercase )
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE__ = get_size_dict(_lowercase , param_name="""crop_size""" )
SCREAMING_SNAKE_CASE__ = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE__ = [self.center_crop(image=_lowercase , size=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
SCREAMING_SNAKE_CASE__ = [self.flip_channel_order(image=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ = {"""pixel_values""": images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def __a ( self : List[Any] , _lowercase : Dict , _lowercase : List[Tuple] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 204 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'''configuration_layoutlmv3''': [
'''LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LayoutLMv3Config''',
'''LayoutLMv3OnnxConfig''',
],
'''processing_layoutlmv3''': ['''LayoutLMv3Processor'''],
'''tokenization_layoutlmv3''': ['''LayoutLMv3Tokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''LayoutLMv3TokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LayoutLMv3ForQuestionAnswering''',
'''LayoutLMv3ForSequenceClassification''',
'''LayoutLMv3ForTokenClassification''',
'''LayoutLMv3Model''',
'''LayoutLMv3PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'''TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLayoutLMv3ForQuestionAnswering''',
'''TFLayoutLMv3ForSequenceClassification''',
'''TFLayoutLMv3ForTokenClassification''',
'''TFLayoutLMv3Model''',
'''TFLayoutLMv3PreTrainedModel''',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['''LayoutLMv3FeatureExtractor''']
lowerCAmelCase_ = ['''LayoutLMv3ImageProcessor''']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 279 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
lowerCamelCase = (
"""This metric will be removed from the library soon, metrics should be handled with the ๐ค Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
return (preds == labels).mean()
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )
__lowercase =fa_score(y_true=_lowerCAmelCase , y_pred=_lowerCAmelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
__lowercase =pearsonr(_lowerCAmelCase , _lowerCAmelCase )[0]
__lowercase =spearmanr(_lowerCAmelCase , _lowerCAmelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCAmelCase , _lowerCAmelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
warnings.warn(_lowerCAmelCase , _lowerCAmelCase )
requires_backends(_lowerCAmelCase , 'sklearn' )
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCAmelCase )} and {len(_lowerCAmelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCAmelCase , _lowerCAmelCase )}
else:
raise KeyError(_lowerCAmelCase )
| 166 | 0 |
'''simple docstring'''
import heapq as hq
import math
from collections.abc import Iterator
class _UpperCAmelCase :
def __init__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = str(id_ )
__lowerCAmelCase = None
__lowerCAmelCase = None
__lowerCAmelCase = []
__lowerCAmelCase = {} # {vertex:distance}
def __lt__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.key < other.key
def __repr__( self ):
'''simple docstring'''
return self.id
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
self.neighbors.append(__A )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = weight
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase ) -> Any:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , _lowercase )
graph[b - 1].add_edge(graph[a - 1] , _lowercase )
def _lowerCAmelCase ( lowercase , lowercase ) -> Optional[Any]:
__lowerCAmelCase = []
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = graph[:]
while q:
__lowerCAmelCase = min(_lowercase )
q.remove(_lowercase )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
for i in range(1 , len(_lowercase ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def _lowerCAmelCase ( lowercase , lowercase ) -> int:
for u in graph:
__lowerCAmelCase = math.inf
__lowerCAmelCase = None
__lowerCAmelCase = 0
__lowerCAmelCase = list(_lowercase )
hq.heapify(_lowercase )
while h:
__lowerCAmelCase = hq.heappop(_lowercase )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
__lowerCAmelCase = u
__lowerCAmelCase = u.edges[v.id]
hq.heapify(_lowercase )
for i in range(1 , len(_lowercase ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def _lowerCAmelCase ( ) -> int:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import numpy as np
from transformers import Pipeline
def _lowerCAmelCase ( lowercase ) -> List[str]:
__lowerCAmelCase = np.max(lowercase , axis=-1 , keepdims=lowercase )
__lowerCAmelCase = np.exp(outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase )
class _UpperCAmelCase ( lowerCAmelCase_ ):
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = {}
if "second_text" in kwargs:
__lowerCAmelCase = kwargs["""second_text"""]
return preprocess_kwargs, {}, {}
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE=None ):
'''simple docstring'''
return self.tokenizer(__SCREAMING_SNAKE_CASE,text_pair=__SCREAMING_SNAKE_CASE,return_tensors=self.framework )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return self.model(**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = model_outputs.logits[0].numpy()
__lowerCAmelCase = softmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = np.argmax(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.model.config.idalabel[best_class]
__lowerCAmelCase = probabilities[best_class].item()
__lowerCAmelCase = logits.tolist()
return {"label": label, "score": score, "logits": logits}
| 46 | 0 |
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
if len(__a ) != len(__a ):
raise ValueError('The length of profit and weight must be same.' )
if max_weight <= 0:
raise ValueError('max_weight must greater than zero.' )
if any(p < 0 for p in profit ):
raise ValueError('Profit can not be negative.' )
if any(w < 0 for w in weight ):
raise ValueError('Weight can not be negative.' )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
snake_case_ : List[Any] = [p / w for p, w in zip(__a , __a )]
# Creating a copy of the list and sorting profit/weight in ascending order
snake_case_ : str = sorted(__a )
# declaring useful variables
snake_case_ : str = len(__a )
snake_case_ : Union[str, Any] = 0
snake_case_ : str = 0
snake_case_ : List[Any] = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
snake_case_ : Tuple = sorted_profit_by_weight[length - i - 1]
snake_case_ : List[str] = profit_by_weight.index(__a )
snake_case_ : int = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
_SCREAMING_SNAKE_CASE = [int(x) for x in input("""Input profits separated by spaces: """).split()]
_SCREAMING_SNAKE_CASE = [int(x) for x in input("""Input weights separated by spaces: """).split()]
_SCREAMING_SNAKE_CASE = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 327 |
import re
import string
import numpy as np
import datasets
_SCREAMING_SNAKE_CASE = """
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
"""
_SCREAMING_SNAKE_CASE = """
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
25.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
50.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results[\"exact_match\"], 1))
75.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"the cat\", \"theater\", \"YELLING\", \"agent007\"]
>>> preds = [\"cat?\", \"theater\", \"yelling\", \"agent\"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=[\"the \", \"yell\", \"YELL\"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results[\"exact_match\"], 1))
100.0
>>> exact_match = datasets.load_metric(\"exact_match\")
>>> refs = [\"The cat sat on the mat.\", \"Theaters are great.\", \"It's like comparing oranges and apples.\"]
>>> preds = [\"The cat sat on the mat?\", \"Theaters are great.\", \"It's like comparing apples and oranges.\"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results[\"exact_match\"], 1))
33.3
"""
_SCREAMING_SNAKE_CASE = """
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE_ ( datasets.Metric ):
def UpperCAmelCase_ ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , reference_urls=[] , )
def UpperCAmelCase_ ( self : int , _A : Tuple , _A : Tuple , _A : str=None , _A : Dict=False , _A : Tuple=False , _A : str=False , ) -> Tuple:
"""simple docstring"""
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
snake_case_ : List[Any] = np.array([re.sub(_A , '' , _A ) for x in predictions] )
snake_case_ : Optional[Any] = np.array([re.sub(_A , '' , _A ) for x in references] )
else:
snake_case_ : Dict = np.asarray(_A )
snake_case_ : Tuple = np.asarray(_A )
if ignore_case:
snake_case_ : List[str] = np.char.lower(_A )
snake_case_ : Any = np.char.lower(_A )
if ignore_punctuation:
snake_case_ : int = string.punctuation.maketrans('' , '' , string.punctuation )
snake_case_ : Tuple = np.char.translate(_A , table=_A )
snake_case_ : str = np.char.translate(_A , table=_A )
if ignore_numbers:
snake_case_ : Optional[int] = string.digits.maketrans('' , '' , string.digits )
snake_case_ : str = np.char.translate(_A , table=_A )
snake_case_ : Union[str, Any] = np.char.translate(_A , table=_A )
snake_case_ : int = predictions == references
return {"exact_match": np.mean(_A ) * 100}
| 327 | 1 |
'''simple docstring'''
def UpperCAmelCase ( ) -> int:
"""simple docstring"""
return [
a * b * (1_0_0_0 - a - b)
for a in range(1 , 9_9_9 )
for b in range(a_ , 9_9_9 )
if (a * a + b * b == (1_0_0_0 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 164 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ : Any = {
'configuration_luke': ['LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LukeConfig'],
'tokenization_luke': ['LukeTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : int = [
'LUKE_PRETRAINED_MODEL_ARCHIVE_LIST',
'LukeForEntityClassification',
'LukeForEntityPairClassification',
'LukeForEntitySpanClassification',
'LukeForMultipleChoice',
'LukeForQuestionAnswering',
'LukeForSequenceClassification',
'LukeForTokenClassification',
'LukeForMaskedLM',
'LukeModel',
'LukePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
UpperCamelCase__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 164 | 1 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
_a = datasets.logging.get_logger(__name__)
_a = '''\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45โ52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 โ95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric",
author = "Moosavi, Nafise Sadat and
Strube, Michael",
booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
month = aug,
year = "2016",
address = "Berlin, Germany",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/P16-1060",
doi = "10.18653/v1/P16-1060",
pages = "632--642",
}
'''
_a = '''\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
'''
_a = '''
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting \'keep_singletons=False\', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
\'mentions\': mentions
\'muc\': MUC metric [Vilain et al, 1995]
\'bcub\': B-cubed [Bagga and Baldwin, 1998]
\'ceafe\': CEAFe [Luo et al., 2005]
\'lea\': LEA [Moosavi and Strube, 2016]
\'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric(\'coval\')
>>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\',
... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\',
... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\',
... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\',
... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\',
... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0}
'''
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=True , __lowerCAmelCase=False , __lowerCAmelCase="dummy_doc" )-> int:
"""simple docstring"""
_UpperCAmelCase = {doc: key_lines}
_UpperCAmelCase = {doc: sys_lines}
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase )
key_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase , _UpperCAmelCase = reader.get_doc_mentions(__lowerCAmelCase , sys_doc_lines[doc] , __lowerCAmelCase )
sys_singletons_num += singletons_num
if NP_only or min_span:
_UpperCAmelCase = reader.set_annotated_parse_trees(__lowerCAmelCase , key_doc_lines[doc] , __lowerCAmelCase , __lowerCAmelCase )
if remove_nested:
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
_UpperCAmelCase , _UpperCAmelCase = reader.remove_nested_coref_mentions(__lowerCAmelCase , __lowerCAmelCase )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
_UpperCAmelCase = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = reader.get_mention_assignments(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
'Number of removed nested coreferring mentions in the key '
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
'Number of resulting singleton clusters in the key '
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
'files, respectively' )
return doc_coref_infos
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = get_coref_infos(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase = {}
_UpperCAmelCase = 0
_UpperCAmelCase = 0
for name, metric in metrics:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = evaluator.evaluate_documents(__lowerCAmelCase , __lowerCAmelCase , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 100:.2f}""" , F""" Precision: {precision * 100:.2f}""" , F""" F1: {fa * 100:.2f}""" , )
if conll_subparts_num == 3:
_UpperCAmelCase = (conll / 3) * 100
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({'conll_score': conll} )
return output_scores
def __A ( __lowerCAmelCase )-> str:
"""simple docstring"""
_UpperCAmelCase = False
for line in key_lines:
if not line.startswith('#' ):
if len(line.split() ) > 6:
_UpperCAmelCase = line.split()[5]
if not parse_col == "-":
_UpperCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class __lowerCamelCase ( datasets.Metric):
"""simple docstring"""
def UpperCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Sequence(datasets.Value('string' ) ),
} ) , codebase_urls=['https://github.com/ns-moosavi/coval'] , reference_urls=[
'https://github.com/ns-moosavi/coval',
'https://www.aclweb.org/anthology/P16-1060',
'http://www.conll.cemantix.org/2012/data.html',
] , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False ):
"""simple docstring"""
_UpperCAmelCase = [
('mentions', evaluator.mentions),
('muc', evaluator.muc),
('bcub', evaluator.b_cubed),
('ceafe', evaluator.ceafe),
('lea', evaluator.lea),
]
if min_span:
_UpperCAmelCase = util.check_gold_parse_annotation(UpperCAmelCase )
if not has_gold_parse:
raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.' )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
_UpperCAmelCase = evaluate(
key_lines=UpperCAmelCase , sys_lines=UpperCAmelCase , metrics=UpperCAmelCase , NP_only=UpperCAmelCase , remove_nested=UpperCAmelCase , keep_singletons=UpperCAmelCase , min_span=UpperCAmelCase , )
return score
| 39 |
import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"""tensor(bool)""": np.bool_,
"""tensor(int8)""": np.inta,
"""tensor(uint8)""": np.uinta,
"""tensor(int16)""": np.intaa,
"""tensor(uint16)""": np.uintaa,
"""tensor(int32)""": np.intaa,
"""tensor(uint32)""": np.uintaa,
"""tensor(int64)""": np.intaa,
"""tensor(uint64)""": np.uintaa,
"""tensor(float16)""": np.floataa,
"""tensor(float)""": np.floataa,
"""tensor(double)""": np.floataa,
}
class UpperCAmelCase :
def __init__(self : Optional[Any] , snake_case__ : Optional[Any]=None , **snake_case__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
snake_case : Optional[Any] = model
snake_case : Dict = kwargs.get("model_save_dir" , snake_case__ )
snake_case : int = kwargs.get("latest_model_name" , snake_case__ )
def __call__(self : Tuple , **snake_case__ : str ) -> List[str]:
'''simple docstring'''
snake_case : Union[str, Any] = {k: np.array(snake_case__ ) for k, v in kwargs.items()}
return self.model.run(snake_case__ , snake_case__ )
@staticmethod
def _SCREAMING_SNAKE_CASE (snake_case__ : Union[str, Path] , snake_case__ : Optional[int]=None , snake_case__ : Optional[int]=None ) -> Any:
'''simple docstring'''
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
snake_case : Optional[int] = "CPUExecutionProvider"
return ort.InferenceSession(snake_case__ , providers=[provider] , sess_options=snake_case__ )
def _SCREAMING_SNAKE_CASE (self : List[Any] , snake_case__ : Union[str, Path] , snake_case__ : Optional[str] = None , **snake_case__ : Any ) -> List[Any]:
'''simple docstring'''
snake_case : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
snake_case : Any = self.model_save_dir.joinpath(self.latest_model_name )
snake_case : str = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
snake_case : List[str] = self.model_save_dir.joinpath(snake_case__ )
if src_path.exists():
snake_case : Tuple = Path(snake_case__ ).joinpath(snake_case__ )
try:
shutil.copyfile(snake_case__ , snake_case__ )
except shutil.SameFileError:
pass
def _SCREAMING_SNAKE_CASE (self : str , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Optional[int] , ) -> str:
'''simple docstring'''
if os.path.isfile(snake_case__ ):
logger.error(f"""Provided path ({save_directory}) should be a directory, not a file""" )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
# saving model weights/files
self._save_pretrained(snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Tuple , snake_case__ : Union[str, Path] , snake_case__ : Optional[Union[bool, str, None]] = None , snake_case__ : Optional[Union[str, None]] = None , snake_case__ : bool = False , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , snake_case__ : Optional["ort.SessionOptions"] = None , **snake_case__ : Tuple , ) -> Tuple:
'''simple docstring'''
snake_case : List[str] = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(snake_case__ ):
snake_case : Any = OnnxRuntimeModel.load_model(
os.path.join(snake_case__ , snake_case__ ) , provider=snake_case__ , sess_options=snake_case__ )
snake_case : Union[str, Any] = Path(snake_case__ )
# load model from hub
else:
# download model
snake_case : Dict = hf_hub_download(
repo_id=snake_case__ , filename=snake_case__ , use_auth_token=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , )
snake_case : List[Any] = Path(snake_case__ ).parent
snake_case : Union[str, Any] = Path(snake_case__ ).name
snake_case : Dict = OnnxRuntimeModel.load_model(snake_case__ , provider=snake_case__ , sess_options=snake_case__ )
return cls(model=snake_case__ , **snake_case__ )
@classmethod
def _SCREAMING_SNAKE_CASE (cls : Optional[Any] , snake_case__ : Union[str, Path] , snake_case__ : bool = True , snake_case__ : Optional[str] = None , snake_case__ : Optional[str] = None , **snake_case__ : Dict , ) -> Union[str, Any]:
'''simple docstring'''
snake_case : Dict = None
if len(str(snake_case__ ).split("@" ) ) == 2:
snake_case , snake_case : int = model_id.split("@" )
return cls._from_pretrained(
model_id=snake_case__ , revision=snake_case__ , cache_dir=snake_case__ , force_download=snake_case__ , use_auth_token=snake_case__ , **snake_case__ , )
| 59 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
"post_extract_proj": "feature_projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.upsample.0": "encoder.upsample.projection",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "layer_norm",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for attribute in key.split('.' ):
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
A__ = getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
A__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
else:
A__ = value
logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
A__ = True
else:
for key, mapped_key in MAPPING.items():
A__ = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(UpperCamelCase__ )[0].split('.' )[-2]
A__ = mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
A__ = 'weight_g'
elif "weight_v" in name:
A__ = 'weight_v'
elif "weight" in name:
A__ = 'weight'
elif "bias" in name:
A__ = 'bias'
else:
A__ = None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = full_name.split('conv_layers.' )[-1]
A__ = name.split('.' )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A__ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A__ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
A__ = SEWConfig()
if is_finetuned:
A__ = model.wav_encoder.wav_model.cfg
else:
A__ = model.cfg
A__ = fs_config.conv_bias
A__ = eval(fs_config.conv_feature_layers )
A__ = [x[0] for x in conv_layers]
A__ = [x[1] for x in conv_layers]
A__ = [x[2] for x in conv_layers]
A__ = 'gelu'
A__ = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
A__ = 0.0
A__ = fs_config.activation_fn.name
A__ = fs_config.encoder_embed_dim
A__ = 0.0_2
A__ = fs_config.encoder_ffn_embed_dim
A__ = 1E-5
A__ = fs_config.encoder_layerdrop
A__ = fs_config.encoder_attention_heads
A__ = fs_config.conv_pos_groups
A__ = fs_config.conv_pos
A__ = len(UpperCamelCase__ )
A__ = fs_config.encoder_layers
A__ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A__ = model.cfg
A__ = fs_config.final_dropout
A__ = fs_config.layerdrop
A__ = fs_config.activation_dropout
A__ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A__ = fs_config.attention_dropout
A__ = fs_config.dropout_input
A__ = fs_config.dropout
A__ = fs_config.mask_channel_length
A__ = fs_config.mask_channel_prob
A__ = fs_config.mask_length
A__ = fs_config.mask_prob
A__ = 'Wav2Vec2FeatureExtractor'
A__ = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=True ):
"""simple docstring"""
if is_finetuned:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
A__ , A__ , A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A__ = SEWConfig.from_pretrained(UpperCamelCase__ )
else:
A__ = convert_config(model[0] , UpperCamelCase__ )
A__ = model[0].eval()
A__ = True if config.feat_extract_norm == 'layer' else False
A__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
if is_finetuned:
if dict_path:
A__ = Dictionary.load(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.eos_index
A__ = len(target_dict.symbols )
A__ = os.path.join(UpperCamelCase__ , 'vocab.json' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , UpperCamelCase__ )
A__ = WavaVecaCTCTokenizer(
UpperCamelCase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=UpperCamelCase__ , )
A__ = WavaVecaProcessor(feature_extractor=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
A__ = SEWForCTC(UpperCamelCase__ )
else:
A__ = SEWModel(UpperCamelCase__ )
feature_extractor.save_pretrained(UpperCamelCase__ )
recursively_load_weights(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
hf_model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--is_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
__lowerCamelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 154 | """simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
__lowerCamelCase = imread(R"digital_image_processing/image_data/lena_small.jpg")
__lowerCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = cn.convert_to_negative(UpperCamelCase__ )
# assert negative_img array for at least one True
assert negative_img.any()
def UpperCAmelCase ( ):
"""simple docstring"""
with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(UpperCamelCase__ , 110 ) ).startswith(
'<PIL.Image.Image image mode=RGB size=100x100 at' )
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = imread('digital_image_processing/image_data/lena_small.jpg' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
A__ = canny.canny(UpperCamelCase__ )
# assert canny array for at least one True
assert canny_array.any()
def UpperCAmelCase ( ):
"""simple docstring"""
assert gg.gaussian_filter(UpperCamelCase__ , 5 , sigma=0.9 ).all()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = array([[0.2_5, 0.5, 0.2_5], [0.5, -3, 0.5], [0.2_5, 0.5, 0.2_5]] )
A__ = conv.img_convolve(UpperCamelCase__ , UpperCamelCase__ ).astype(UpperCamelCase__ )
assert res.any()
def UpperCAmelCase ( ):
"""simple docstring"""
assert med.median_filter(UpperCamelCase__ , 3 ).any()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ , A__ = sob.sobel_filter(UpperCamelCase__ )
assert grad.any() and theta.any()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = sp.make_sepia(UpperCamelCase__ , 20 )
assert sepia.all()
def UpperCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" ):
"""simple docstring"""
A__ = bs.Burkes(imread(UpperCamelCase__ , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def UpperCAmelCase ( UpperCamelCase__ = "digital_image_processing/image_data/lena_small.jpg" , ):
"""simple docstring"""
A__ = rs.NearestNeighbour(imread(UpperCamelCase__ , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = 'digital_image_processing/image_data/lena.jpg'
# Reading the image and converting it to grayscale.
A__ = imread(UpperCamelCase__ , 0 )
# Test for get_neighbors_pixel function() return not None
A__ = 0
A__ = 0
A__ = image[x_coordinate][y_coordinate]
A__ = lbp.get_neighbors_pixel(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
A__ = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
A__ = lbp.local_binary_value(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
assert lbp_image.any()
| 154 | 1 |
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
lowerCamelCase : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
lowerCamelCase : Any = json.load(f)
@require_torch
class A( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Tuple , A_ : int ) -> str:
"""simple docstring"""
return FSMTTokenizer.from_pretrained(A_ )
def a__ ( self : Dict , A_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = FSMTForConditionalGeneration.from_pretrained(A_ ).to(A_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
['en-ru', 26.0],
['ru-en', 22.0],
['en-de', 22.0],
['de-en', 29.0],
] )
@slow
def a__ ( self : Optional[int] , A_ : Union[str, Any] , A_ : int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ = f"""facebook/wmt19-{pair}"""
lowerCamelCase_ = self.get_tokenizer(A_ )
lowerCamelCase_ = self.get_model(A_ )
lowerCamelCase_ = bleu_data[pair]['src']
lowerCamelCase_ = bleu_data[pair]['tgt']
lowerCamelCase_ = tokenizer(A_ , return_tensors='pt' , truncation=A_ , padding='longest' ).to(A_ )
lowerCamelCase_ = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCamelCase_ = tokenizer.batch_decode(
A_ , skip_special_tokens=A_ , clean_up_tokenization_spaces=A_ )
lowerCamelCase_ = calculate_bleu(A_ , A_ )
print(A_ )
self.assertGreaterEqual(scores['bleu'] , A_ )
| 204 |
lowerCamelCase : Tuple = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : int , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = [False] * len(lowercase )
lowerCamelCase_ = [s]
lowerCamelCase_ = True
while queue:
lowerCamelCase_ = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase )
lowerCamelCase_ = True
lowerCamelCase_ = u
return visited[t]
def _SCREAMING_SNAKE_CASE ( lowercase : List[str] , lowercase : Tuple , lowercase : Tuple ):
'''simple docstring'''
lowerCamelCase_ = [-1] * (len(lowercase ))
lowerCamelCase_ = 0
lowerCamelCase_ = []
lowerCamelCase_ = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase , lowercase , lowercase , lowercase ):
lowerCamelCase_ = float('Inf' )
lowerCamelCase_ = sink
while s != source:
# Find the minimum value in select path
lowerCamelCase_ = min(lowercase , graph[parent[s]][s] )
lowerCamelCase_ = parent[s]
max_flow += path_flow
lowerCamelCase_ = sink
while v != source:
lowerCamelCase_ = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowerCamelCase_ = parent[v]
for i in range(len(lowercase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 204 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Dict =logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] ={
'''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''',
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __A ( a ):
__A = """vit_mae"""
def __init__( self , UpperCAmelCase_=768 , UpperCAmelCase_=12 , UpperCAmelCase_=12 , UpperCAmelCase_=3072 , UpperCAmelCase_="gelu" , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0 , UpperCAmelCase_=0.0_2 , UpperCAmelCase_=1E-12 , UpperCAmelCase_=224 , UpperCAmelCase_=16 , UpperCAmelCase_=3 , UpperCAmelCase_=True , UpperCAmelCase_=16 , UpperCAmelCase_=512 , UpperCAmelCase_=8 , UpperCAmelCase_=2048 , UpperCAmelCase_=0.7_5 , UpperCAmelCase_=False , **UpperCAmelCase_ , ):
super().__init__(**UpperCAmelCase_ )
lowerCamelCase =hidden_size
lowerCamelCase =num_hidden_layers
lowerCamelCase =num_attention_heads
lowerCamelCase =intermediate_size
lowerCamelCase =hidden_act
lowerCamelCase =hidden_dropout_prob
lowerCamelCase =attention_probs_dropout_prob
lowerCamelCase =initializer_range
lowerCamelCase =layer_norm_eps
lowerCamelCase =image_size
lowerCamelCase =patch_size
lowerCamelCase =num_channels
lowerCamelCase =qkv_bias
lowerCamelCase =decoder_num_attention_heads
lowerCamelCase =decoder_hidden_size
lowerCamelCase =decoder_num_hidden_layers
lowerCamelCase =decoder_intermediate_size
lowerCamelCase =mask_ratio
lowerCamelCase =norm_pix_loss
| 262 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class __A ( unittest.TestCase ):
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase =Vector()
def _snake_case ( self ):
lowerCamelCase =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(UpperCAmelCase_ ) , """(0,0,0,0,0,1)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3, 4] )
self.assertEqual(len(UpperCAmelCase_ ) , 4 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2] )
lowerCamelCase =Vector([1, 2, 3, 4, 5] )
lowerCamelCase =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.2_3_6 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.4_1_6 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.6_1_6 , 3 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , """(3.0,6.0,9.0)""" )
self.assertEqual((a * b) , 0 )
def _snake_case ( self ):
self.assertEqual(str(zero_vector(10 ) ).count("""0""" ) , 10 )
def _snake_case ( self ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , """(0,1,0)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 2, 3] )
lowerCamelCase =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , UpperCAmelCase_ , UpperCAmelCase_ ) ) , """(3,4,7)""" )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase =x.copy()
self.assertEqual(str(UpperCAmelCase_ ) , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(UpperCAmelCase_ ) , """(0,1,0)""" )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("""|1,2,3|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(UpperCAmelCase_ , UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase =Vector([1, 2, 3] )
self.assertEqual("""(14,32,50)""" , str(a * x ) )
self.assertEqual("""|2,4,6|\n|8,10,12|\n|14,16,18|\n""" , str(a * 2 ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("""|1,2,5|\n|2,4,5|\n|6,7,8|\n""" , str(UpperCAmelCase_ ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.0_1 )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|2,4,10|\n|4,8,10|\n|12,14,18|\n""" , str(a + b ) )
def _snake_case ( self ):
lowerCamelCase =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("""|0,0,-4|\n|0,0,0|\n|0,0,-2|\n""" , str(a - b ) )
def _snake_case ( self ):
self.assertEqual(
"""|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n""" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 262 | 1 |
"""simple docstring"""
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _snake_case ( snake_case__ : Dict ):
A = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : int ):
A , A = emb.weight.shape
A = nn.Linear(snake_case__ , snake_case__ , bias=snake_case__ )
A = emb.weight.data
return lin_layer
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any="facebook/mbart-large-en-ro" , snake_case__ : Optional[int]=False , snake_case__ : List[str]=False ):
A = torch.load(snake_case__ , map_location='cpu' )['model']
remove_ignore_keys_(snake_case__ )
A = state_dict['encoder.embed_tokens.weight'].shape[0]
A = MBartConfig.from_pretrained(snake_case__ , vocab_size=snake_case__ )
if mbart_aa and finetuned:
A = 'relu'
A = state_dict['decoder.embed_tokens.weight']
A = MBartForConditionalGeneration(snake_case__ )
model.model.load_state_dict(snake_case__ )
if finetuned:
A = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
_lowercase = parser.parse_args()
_lowercase = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path) | 74 |
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE ):
return ext
raise Exception(
F'Unable to determine file format from file extension {path}. '
F'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
lowerCAmelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
lowerCAmelCase = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
class lowercase ( _UpperCAmelCase ):
def __init__( self , lowercase , lowercase ) -> Union[str, Any]:
lowerCAmelCase = nlp
lowerCAmelCase = reader
@staticmethod
def _snake_case ( lowercase ) -> Optional[int]:
lowerCAmelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=lowercase , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=lowercase , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=lowercase , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=lowercase , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=lowercase , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=lowercase , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=lowercase , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=lowercase , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase , lowerCAmelCase = self._nlp, []
for entry in self._reader:
lowerCAmelCase = nlp(**lowercase ) if self._reader.is_multi_columns else nlp(lowercase )
if isinstance(lowercase , lowercase ):
outputs.append(lowercase )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
lowerCAmelCase = self._reader.save_binary(lowercase )
logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' )
else:
self._reader.save(lowercase )
| 46 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a__ :
'''simple docstring'''
lowercase__ : Union[str, Any] = 4_2 # [batch_size x 3]
lowercase__ : Dict = 4_2 # [batch_size x 3]
lowercase__ : Dict = 4_2 # [batch_size x 3]
lowercase__ : List[str] = 4_2 # [batch_size x 3]
lowercase__ : List[Any] = 4_2
lowercase__ : Optional[int] = 4_2
lowercase__ : Union[str, Any] = 4_2
lowercase__ : Optional[int] = 4_2
lowercase__ : Dict = 4_2
def __SCREAMING_SNAKE_CASE ( self ) -> int:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def __SCREAMING_SNAKE_CASE ( self ) -> torch.Tensor:
lowerCAmelCase__ = torch.arange(self.height * self.width )
lowerCAmelCase__ = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCamelCase__ , self.width , rounding_mode='''trunc''' ),
] , axis=1 , )
return coords
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ , *lowerCAmelCase__ = self.shape
lowerCAmelCase__ = int(np.prod(UpperCamelCase__ ) )
lowerCAmelCase__ = self.get_image_coords()
lowerCAmelCase__ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCAmelCase__ = self.get_camera_rays(UpperCamelCase__ )
lowerCAmelCase__ = rays.view(UpperCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> torch.Tensor:
lowerCAmelCase__ , *lowerCAmelCase__ , lowerCAmelCase__ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCAmelCase__ = coords.view(UpperCamelCase__ , -1 , 2 )
lowerCAmelCase__ = self.resolution()
lowerCAmelCase__ = self.fov()
lowerCAmelCase__ = (flat.float() / (res - 1)) * 2 - 1
lowerCAmelCase__ = fracs * torch.tan(fov / 2 )
lowerCAmelCase__ = fracs.view(UpperCamelCase__ , -1 , 2 )
lowerCAmelCase__ = (
self.z.view(UpperCamelCase__ , 1 , 3 )
+ self.x.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCAmelCase__ = directions / directions.norm(dim=-1 , keepdim=UpperCamelCase__ )
lowerCAmelCase__ = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCamelCase__ , *UpperCamelCase__ , 2 , 3 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _snake_case ( A ) -> List[str]:
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowerCAmelCase__ = np.array([np.sin(__UpperCamelCase ), np.cos(__UpperCamelCase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCAmelCase__ = -z * 4
lowerCAmelCase__ = np.array([np.cos(__UpperCamelCase ), -np.sin(__UpperCamelCase ), 0.0] )
lowerCAmelCase__ = np.cross(__UpperCamelCase , __UpperCamelCase )
origins.append(__UpperCamelCase )
xs.append(__UpperCamelCase )
ys.append(__UpperCamelCase )
zs.append(__UpperCamelCase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(__UpperCamelCase , axis=0 ) ).float() , width=__UpperCamelCase , height=__UpperCamelCase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(__UpperCamelCase )) , ) | 369 |
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_=99 , lowerCamelCase_=13 , lowerCamelCase_=7 , lowerCamelCase_=9 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=False , lowerCamelCase_=32 , lowerCamelCase_=5 , lowerCamelCase_=4 , lowerCamelCase_=37 , lowerCamelCase_=8 , lowerCamelCase_=0.1 , lowerCamelCase_=0.002 , lowerCamelCase_=1 , lowerCamelCase_=0 , lowerCamelCase_=0 , lowerCamelCase_=None , lowerCamelCase_=None , ) -> str:
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = encoder_seq_length
lowerCAmelCase__ = decoder_seq_length
# For common tests
lowerCAmelCase__ = self.decoder_seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_attention_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = hidden_size
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = eos_token_id
lowerCAmelCase__ = pad_token_id
lowerCAmelCase__ = decoder_start_token_id
lowerCAmelCase__ = None
lowerCAmelCase__ = decoder_layers
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return TaConfig.from_pretrained('''google/umt5-base''' )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> List[str]:
if attention_mask is None:
lowerCAmelCase__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
lowerCAmelCase__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
lowerCAmelCase__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowerCamelCase_ )
if decoder_head_mask is None:
lowerCAmelCase__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase_ )
if cross_attn_head_mask is None:
lowerCAmelCase__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowerCamelCase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowerCAmelCase__ = input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = config.num_attention_heads
lowerCAmelCase__ = self.prepare_inputs_dict(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return config, input_dict
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> Dict:
lowerCAmelCase__ = UMTaModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCAmelCase__ = model(
input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_attention_mask=lowerCamelCase_ , )
lowerCAmelCase__ = model(input_ids=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ )
lowerCAmelCase__ = result.last_hidden_state
lowerCAmelCase__ = result.past_key_values
lowerCAmelCase__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowerCamelCase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ) -> int:
lowerCAmelCase__ = UMTaModel(config=lowerCamelCase_ ).get_decoder().to(lowerCamelCase_ ).eval()
# first forward pass
lowerCAmelCase__ = model(lowerCamelCase_ , use_cache=lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ )
lowerCAmelCase__ = model(lowerCamelCase_ , use_cache=lowerCamelCase_ )
self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) )
self.parent.assertTrue(len(lowerCamelCase_ ) == len(lowerCamelCase_ ) + 1 )
lowerCAmelCase__ , lowerCAmelCase__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCAmelCase__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
lowerCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
lowerCAmelCase__ = model(lowerCamelCase_ )['''last_hidden_state''']
lowerCAmelCase__ = model(lowerCamelCase_ , past_key_values=lowerCamelCase_ )['''last_hidden_state''']
# select random slice
lowerCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
lowerCAmelCase__ = output_from_no_past[:, -1, random_slice_idx].detach()
lowerCAmelCase__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 ) )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , ) -> List[str]:
lowerCAmelCase__ = UMTaModel(config=lowerCamelCase_ ).to(lowerCamelCase_ ).half().eval()
lowerCAmelCase__ = model(**lowerCamelCase_ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(lowerCamelCase_ ).any().item() )
@require_torch
class a__ ( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Tuple = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
lowercase__ : List[Any] = (UMTaForConditionalGeneration,) if is_torch_available() else ()
lowercase__ : Dict = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
lowercase__ : Optional[int] = True
lowercase__ : Tuple = False
lowercase__ : Optional[int] = False
lowercase__ : Optional[Any] = True
lowercase__ : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
lowercase__ : int = [0.8, 0.9]
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
lowerCAmelCase__ = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = UMTaModel(config_and_inputs[0] ).to(lowerCamelCase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowerCamelCase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , F"""{tmpdirname}/t5_test.onnx""" , export_params=lowerCamelCase_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ = config_and_inputs[0]
lowerCAmelCase__ = UMTaForConditionalGeneration(lowerCamelCase_ ).eval()
model.to(lowerCamelCase_ )
lowerCAmelCase__ = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=lowerCamelCase_ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ ),
}
for attn_name, (name, mask) in zip(lowerCamelCase_ , head_masking.items() ):
lowerCAmelCase__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowerCAmelCase__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=lowerCamelCase_ )
lowerCAmelCase__ = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=lowerCamelCase_ , return_dict_in_generate=lowerCamelCase_ , **lowerCamelCase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowerCAmelCase__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=lowerCamelCase_ ).to(lowerCamelCase_ )
lowerCAmelCase__ = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=lowerCamelCase_ , legacy=lowerCamelCase_ )
lowerCAmelCase__ = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
lowerCAmelCase__ = tokenizer(lowerCamelCase_ , return_tensors='''pt''' , padding=lowerCamelCase_ ).input_ids
# fmt: off
lowerCAmelCase__ = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = model.generate(input_ids.to(lowerCamelCase_ ) )
lowerCAmelCase__ = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
lowerCAmelCase__ = tokenizer.batch_decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) | 228 | 0 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__A = "base_with_context"
def _A ( lowercase__ , lowercase__ ):
lowercase__ = nn.Parameter(torch.FloatTensor(weights["""token_embedder"""]["""embedding"""] ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__ = weights[f'''layers_{lyr_num}''']
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowercase__ = ly_weight["""attention"""]
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _A ( lowercase__ , lowercase__ ):
lowercase__ = nn.Parameter(torch.FloatTensor(weights["""input_proj"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowercase__ )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__ = weights[f'''layers_{lyr_num}''']
lowercase__ = ly_weight["""attention"""]
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_attention_layer_norm"""]["""scale"""] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights["""encoder_norm"""]["""scale"""] ) )
return model
def _A ( lowercase__ , lowercase__ ):
lowercase__ = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense0"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights["""time_emb_dense1"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(weights["""Embed_0"""]["""embedding"""] ) , requires_grad=lowercase__ )
lowercase__ = nn.Parameter(
torch.FloatTensor(weights["""continuous_inputs_projection"""]["""kernel"""].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase__ = weights[f'''layers_{lyr_num}''']
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_self_attention_layer_norm"""]["""scale"""] ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_0"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowercase__ = ly_weight["""self_attention"""]
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowercase__ = ly_weight["""MultiHeadDotProductAttention_0"""]
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""query"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""key"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""value"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(attention_weights["""out"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""pre_cross_attention_layer_norm"""]["""scale"""] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""pre_mlp_layer_norm"""]["""scale"""] ) )
lowercase__ = nn.Parameter(
torch.FloatTensor(ly_weight["""FiLMLayer_1"""]["""DenseGeneral_0"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_0"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wi_1"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(ly_weight["""mlp"""]["""wo"""]["""kernel"""].T ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights["""decoder_norm"""]["""scale"""] ) )
lowercase__ = nn.Parameter(torch.FloatTensor(weights["""spec_out_dense"""]["""kernel"""].T ) )
return model
def _A ( lowercase__ ):
lowercase__ = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase__ = jnp.tree_util.tree_map(onp.array , lowercase__ )
lowercase__ = [
"""from __gin__ import dynamic_registration""",
"""from music_spectrogram_diffusion.models.diffusion import diffusion_utils""",
"""diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""",
"""diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""",
]
lowercase__ = os.path.join(args.checkpoint_path , """..""" , """config.gin""" )
lowercase__ = inference.parse_training_gin_file(lowercase__ , lowercase__ )
lowercase__ = inference.InferenceModel(args.checkpoint_path , lowercase__ )
lowercase__ = DDPMScheduler(beta_schedule="""squaredcos_cap_v2""" , variance_type="""fixed_large""" )
lowercase__ = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["""inputs"""] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowercase__ = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["""targets_context"""] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="""gated-gelu""" , )
lowercase__ = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["""targets_context"""] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowercase__ = load_notes_encoder(ta_checkpoint["""target"""]["""token_encoder"""] , lowercase__ )
lowercase__ = load_continuous_encoder(ta_checkpoint["""target"""]["""continuous_encoder"""] , lowercase__ )
lowercase__ = load_decoder(ta_checkpoint["""target"""]["""decoder"""] , lowercase__ )
lowercase__ = OnnxRuntimeModel.from_pretrained("""kashif/soundstream_mel_decoder""" )
lowercase__ = SpectrogramDiffusionPipeline(
notes_encoder=lowercase__ , continuous_encoder=lowercase__ , decoder=lowercase__ , scheduler=lowercase__ , melgan=lowercase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__A = parser.parse_args()
main(args)
| 164 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
__A = False
class A ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase__ = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
lowercase__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
lowercase__ = torch.manual_seed(0 )
lowercase__ = pipe(
image=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
lowercase__ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
lowercase__ = np.array([0.04_41, 0.04_69, 0.05_07, 0.05_75, 0.06_32, 0.06_50, 0.08_65, 0.09_09, 0.09_45] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 164 | 1 |
"""simple docstring"""
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_a : Tuple = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any] ) -> str:
_lowerCAmelCase : Optional[int] = r"\w+[.]\d+"
_lowerCAmelCase : Any = re.findall(_lowerCamelCase ,_lowerCamelCase )
for pat in pats:
_lowerCAmelCase : Any = key.replace(_lowerCamelCase ,"""_""".join(pat.split(""".""" ) ) )
return key
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[int] ) -> Dict:
_lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("scale",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
_lowerCAmelCase : str = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
_lowerCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
_lowerCAmelCase : Dict = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowerCAmelCase : List[Any] = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
_lowerCAmelCase : Optional[int] = pt_tensor.transpose(2 ,3 ,1 ,0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowerCAmelCase : Any = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
_lowerCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowerCAmelCase : str = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowerCAmelCase : str = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : int ,_lowerCamelCase : Any ,_lowerCamelCase : Tuple=42 ) -> Tuple:
_lowerCAmelCase : str = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
_lowerCAmelCase : List[str] = flax_model.init_weights(PRNGKey(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = flatten_dict(_lowerCamelCase )
_lowerCAmelCase : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase : str = rename_key(_lowerCamelCase )
_lowerCAmelCase : Dict = tuple(renamed_pt_key.split(""".""" ) )
# Correctly rename weight parameters
_lowerCAmelCase : Optional[Any] = rename_key_and_reshape_tensor(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
f"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Any = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
| 370 | """simple docstring"""
from ..utils import DummyObject, requires_backends
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Any = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : str = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Union[str, Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Tuple = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[int] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Optional[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[Any] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : Dict = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : List[str] = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
class __A ( metaclass=SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = ["sentencepiece"]
def __init__( self , *a__ , **a__ ):
requires_backends(self , ["""sentencepiece"""] )
| 126 | 0 |
import operator
def __UpperCamelCase ( _A : list , _A : bool = False , _A : list | None = None ) ->list:
"""simple docstring"""
lowerCamelCase_ =operator.lt if reverse else operator.gt
lowerCamelCase_ =solution or []
if not arr:
return solution
lowerCamelCase_ =[arr.pop(0 )]
for i, item in enumerate(_A ):
if _operator(_A , sublist[-1] ):
sublist.append(_A )
arr.pop(_A )
# merging sublist into solution list
if not solution:
solution.extend(_A )
else:
while sublist:
lowerCamelCase_ =sublist.pop(0 )
for i, xx in enumerate(_A ):
if not _operator(_A , _A ):
solution.insert(_A , _A )
break
else:
solution.append(_A )
strand_sort(_A , _A , _A )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 154 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[int] = ["image_processor", "tokenizer"]
_UpperCamelCase:Tuple = "ChineseCLIPImageProcessor"
_UpperCamelCase:List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )-> Tuple:
lowerCamelCase_ =None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , _SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =kwargs.pop("""feature_extractor""" )
lowerCamelCase_ =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.image_processor
def __call__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )-> Optional[Any]:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
lowerCamelCase_ =self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
lowerCamelCase_ =self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCamelCase_ =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> List[str]:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> Any:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =self.tokenizer.model_input_names
lowerCamelCase_ =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _snake_case ( self )-> int:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
| 154 | 1 |
import qiskit
def UpperCamelCase (lowercase_: Any , lowercase_: Tuple ) -> qiskit.result.counts.Counts:
A__ : Optional[Any] = qiskit.Aer.get_backend("""aer_simulator""" )
# Create a Quantum Circuit acting on the q register
A__ : str = qiskit.QuantumCircuit(UpperCAmelCase__ , UpperCAmelCase__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A__ : Optional[int] = qiskit.execute(UpperCAmelCase__ , UpperCAmelCase__ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase__ )
if __name__ == "__main__":
A_ : str = single_qubit_measure(2, 2)
print(f'''Total count for various states are: {counts}''')
| 363 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
A_ : List[Any] = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[Any] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Optional[int] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
A_ : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 141 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.