code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
A__ = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
A__ = VideoClassificationPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ , top_k=2 )
A__ = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
for example in examples:
A__ = video_classifier(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
{"score": ANY(SCREAMING_SNAKE_CASE_ ), "label": ANY(SCREAMING_SNAKE_CASE_ )},
{"score": ANY(SCREAMING_SNAKE_CASE_ ), "label": ANY(SCREAMING_SNAKE_CASE_ )},
] , )
@require_torch
def snake_case__ ( self ) -> List[Any]:
A__ = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
A__ = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
A__ = pipeline(
"video-classification" , model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , frame_sampling_rate=4 )
A__ = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
A__ = video_classifier(SCREAMING_SNAKE_CASE_ , top_k=2 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}] , )
A__ = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}],
[{"score": 0.5_1_9_9, "label": "LABEL_0"}, {"score": 0.4_8_0_1, "label": "LABEL_1"}],
] , )
@require_tf
def snake_case__ ( self ) -> str:
pass
| 104 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 | 0 |
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : torch.FloatTensor
__lowerCAmelCase : Optional[torch.FloatTensor] =None
def _snake_case (_snake_case : Tuple , _snake_case : Union[str, Any]=0.999 , _snake_case : Tuple="cosine" , ) -> Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(_snake_case : Optional[int]):
return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_snake_case : Tuple):
return math.exp(t * -12.0)
else:
raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''')
_lowercase =[]
for i in range(UpperCAmelCase_):
_lowercase =i / num_diffusion_timesteps
_lowercase =(i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCAmelCase_) / alpha_bar_fn(UpperCAmelCase_) , UpperCAmelCase_))
return torch.tensor(UpperCAmelCase_ , dtype=torch.floataa)
class SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self :Optional[Any], snake_case :Tuple = 1000, snake_case :List[str] = "fixed_small_log", snake_case :Any = True, snake_case :List[Any] = 1.0, snake_case :Any = "epsilon", snake_case :Optional[Any] = "squaredcos_cap_v2", ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'')
_lowercase =betas_for_alpha_bar(SCREAMING_SNAKE_CASE_)
_lowercase =1.0 - self.betas
_lowercase =torch.cumprod(self.alphas, dim=0)
_lowercase =torch.tensor(1.0)
# standard deviation of the initial noise distribution
_lowercase =1.0
# setable values
_lowercase =None
_lowercase =torch.from_numpy(np.arange(0, SCREAMING_SNAKE_CASE_)[::-1].copy())
_lowercase =variance_type
def UpperCamelCase__ ( self :List[str], snake_case :Any, snake_case :Any = None):
"""simple docstring"""
return sample
def UpperCamelCase__ ( self :str, snake_case :int, snake_case :Tuple = None):
"""simple docstring"""
_lowercase =num_inference_steps
_lowercase =(self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
_lowercase =(np.arange(0, SCREAMING_SNAKE_CASE_) * step_ratio).round()[::-1].copy().astype(np.intaa)
_lowercase =torch.from_numpy(SCREAMING_SNAKE_CASE_).to(SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :List[str], snake_case :Dict, snake_case :str=None, snake_case :List[Any]=None, snake_case :Any=None):
"""simple docstring"""
if prev_timestep is None:
_lowercase =t - 1
_lowercase =self.alphas_cumprod[t]
_lowercase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase =1 - alpha_prod_t
_lowercase =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase =self.betas[t]
else:
_lowercase =1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
_lowercase =beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
_lowercase =self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
_lowercase =torch.log(torch.clamp(SCREAMING_SNAKE_CASE_, min=1e-2_0))
_lowercase =torch.exp(0.5 * variance)
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
_lowercase =variance.log()
_lowercase =beta.log()
_lowercase =(predicted_variance + 1) / 2
_lowercase =frac * max_log + (1 - frac) * min_log
return variance
def UpperCamelCase__ ( self :Tuple, snake_case :str, snake_case :List[Any], snake_case :Tuple, snake_case :List[Any] = None, snake_case :Optional[Any]=None, snake_case :int = True, ):
"""simple docstring"""
_lowercase =timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
_lowercase =torch.split(SCREAMING_SNAKE_CASE_, sample.shape[1], dim=1)
else:
_lowercase =None
# 1. compute alphas, betas
if prev_timestep is None:
_lowercase =t - 1
_lowercase =self.alphas_cumprod[t]
_lowercase =self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
_lowercase =1 - alpha_prod_t
_lowercase =1 - alpha_prod_t_prev
if prev_timestep == t - 1:
_lowercase =self.betas[t]
_lowercase =self.alphas[t]
else:
_lowercase =1 - alpha_prod_t / alpha_prod_t_prev
_lowercase =1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
_lowercase =(sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
_lowercase =model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`'''
' for the UnCLIPScheduler.')
# 3. Clip "predicted x_0"
if self.config.clip_sample:
_lowercase =torch.clamp(
SCREAMING_SNAKE_CASE_, -self.config.clip_sample_range, self.config.clip_sample_range)
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase =(alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
_lowercase =alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
_lowercase =pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
_lowercase =0
if t > 0:
_lowercase =randn_tensor(
model_output.shape, dtype=model_output.dtype, generator=SCREAMING_SNAKE_CASE_, device=model_output.device)
_lowercase =self._get_variance(
SCREAMING_SNAKE_CASE_, predicted_variance=SCREAMING_SNAKE_CASE_, prev_timestep=SCREAMING_SNAKE_CASE_, )
if self.variance_type == "fixed_small_log":
_lowercase =variance
elif self.variance_type == "learned_range":
_lowercase =(0.5 * variance).exp()
else:
raise ValueError(
f'''variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`'''
' for the UnCLIPScheduler.')
_lowercase =variance * variance_noise
_lowercase =pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_, pred_original_sample=SCREAMING_SNAKE_CASE_)
def UpperCamelCase__ ( self :Tuple, snake_case :str, snake_case :Any, snake_case :str, ):
"""simple docstring"""
_lowercase =self.alphas_cumprod.to(device=original_samples.device, dtype=original_samples.dtype)
_lowercase =timesteps.to(original_samples.device)
_lowercase =alphas_cumprod[timesteps] ** 0.5
_lowercase =sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
_lowercase =sqrt_alpha_prod.unsqueeze(-1)
_lowercase =(1 - alphas_cumprod[timesteps]) ** 0.5
_lowercase =sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
_lowercase =sqrt_one_minus_alpha_prod.unsqueeze(-1)
_lowercase =sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 181 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowercase_ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowercase_ = TaTokenizerFast
lowercase_ = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowercase_ = _LazyModule(
__name__,
globals()['__file__'],
_import_structure,
extra_objects={'MT5Tokenizer': MTaTokenizer, 'MT5TokenizerFast': MTaTokenizerFast},
module_spec=__spec__,
)
| 291 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetaImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=True , __UpperCAmelCase=1 / 255 , __UpperCAmelCase=True , ):
"""simple docstring"""
a__ : int = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
a__ : Optional[int] = parent
a__ : Union[str, Any] = batch_size
a__ : str = num_channels
a__ : Union[str, Any] = min_resolution
a__ : int = max_resolution
a__ : str = do_resize
a__ : Tuple = size
a__ : str = do_normalize
a__ : Optional[int] = image_mean
a__ : int = image_std
a__ : Union[str, Any] = do_rescale
a__ : Optional[Any] = rescale_factor
a__ : Optional[int] = do_pad
def _A ( self ):
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _A ( self , __UpperCAmelCase , __UpperCAmelCase=False ):
"""simple docstring"""
if not batched:
a__ : str = image_inputs[0]
if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ):
a__ : Union[str, Any] = image.size
else:
a__ : Optional[int] = image.shape[1], image.shape[2]
if w < h:
a__ : List[Any] = int(self.size["shortest_edge"] * h / w )
a__ : str = self.size['shortest_edge']
elif w > h:
a__ : Optional[int] = self.size['shortest_edge']
a__ : List[str] = int(self.size["shortest_edge"] * w / h )
else:
a__ : Any = self.size['shortest_edge']
a__ : List[str] = self.size['shortest_edge']
else:
a__ : Optional[Any] = []
for image in image_inputs:
a__ : Dict = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
a__ : Any = max(SCREAMING_SNAKE_CASE_ , key=lambda __UpperCAmelCase : item[0] )[0]
a__ : Any = max(SCREAMING_SNAKE_CASE_ , key=lambda __UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _a ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
A :Union[str, Any] = DetaImageProcessor if is_vision_available() else None
def _A ( self ):
"""simple docstring"""
a__ : Dict = DetaImageProcessingTester(self )
@property
def _A ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self ):
"""simple docstring"""
a__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_mean" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "image_std" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_normalize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_resize" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_rescale" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "do_pad" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "size" ) )
def _A ( self ):
"""simple docstring"""
a__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
pass
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
a__ : Dict = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ : List[str] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ : Optional[int] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
a__ : str = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
a__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ : int = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ : Tuple = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
a__ : List[str] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
a__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
a__ : Optional[Any] = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
a__ : Optional[Any] = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
a__ : str = self.image_processor_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _A ( self ):
"""simple docstring"""
a__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
a__ : List[Any] = json.loads(f.read() )
a__ : List[str] = {'image_id': 3_9769, 'annotations': target}
# encode them
a__ : Union[str, Any] = DetaImageProcessor()
a__ : int = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
# verify pixel values
a__ : Optional[int] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE_ )
a__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify area
a__ : Optional[Any] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
a__ : Optional[Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE_ )
a__ : int = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# verify image_id
a__ : Union[str, Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
a__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
a__ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE_ ) )
# verify orig_size
a__ : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE_ ) )
# verify size
a__ : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE_ ) )
@slow
def _A ( self ):
"""simple docstring"""
a__ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
a__ : Any = json.loads(f.read() )
a__ : int = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
a__ : List[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
a__ : Union[str, Any] = DetaImageProcessor(format="coco_panoptic" )
a__ : Optional[Any] = image_processing(images=SCREAMING_SNAKE_CASE_ , annotations=SCREAMING_SNAKE_CASE_ , masks_path=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
# verify pixel values
a__ : List[str] = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , SCREAMING_SNAKE_CASE_ )
a__ : int = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
# verify area
a__ : Optional[Any] = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , SCREAMING_SNAKE_CASE_ ) )
# verify boxes
a__ : int = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , SCREAMING_SNAKE_CASE_ )
a__ : Optional[Any] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
# verify image_id
a__ : Optional[int] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , SCREAMING_SNAKE_CASE_ ) )
# verify is_crowd
a__ : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , SCREAMING_SNAKE_CASE_ ) )
# verify class_labels
a__ : Optional[Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , SCREAMING_SNAKE_CASE_ ) )
# verify masks
a__ : Any = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , SCREAMING_SNAKE_CASE_ )
# verify orig_size
a__ : Tuple = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , SCREAMING_SNAKE_CASE_ ) )
# verify size
a__ : Tuple = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , SCREAMING_SNAKE_CASE_ ) )
| 191 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
def _lowerCAmelCase ( lowerCAmelCase_ :list , lowerCAmelCase_ :list , lowerCAmelCase_ :int , lowerCAmelCase_ :int , lowerCAmelCase_ :int )->int:
'''simple docstring'''
if index == number_of_items:
return 0
snake_case_ = 0
snake_case_ = 0
snake_case_ = knapsack(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index + 1 )
if weights[index] <= max_weight:
snake_case_ = values[index] + knapsack(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , max_weight - weights[index] , index + 1 )
return max(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]:
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : Any = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[int] = num_patches + 1
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (self.image_size, self.image_size)
__lowerCamelCase : str = (self.patch_size, self.patch_size)
__lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Tuple = self.type_sequence_label_size
__lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase_ ( self ) -> None:
__lowerCamelCase : str = FlaxViTModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowercase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
__lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Union[str, Any]:
__snake_case = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__snake_case = [144, 192, 240]
__snake_case = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__snake_case = [96, 120, 144]
__snake_case = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__snake_case = [64, 80, 96]
__snake_case = [16, 16, 24, 48, 64, 80, 320]
__snake_case = 0.05
__snake_case = 2.0
if mobilevit_name.startswith('''deeplabv3_''' ):
__snake_case = 512
__snake_case = 16
__snake_case = 21
__snake_case = 'pascal-voc-id2label.json'
else:
__snake_case = 1000
__snake_case = 'imagenet-1k-id2label.json'
__snake_case = 'huggingface/label-files'
__snake_case = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
__snake_case = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
__snake_case = idalabel
__snake_case = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : str=False ) -> Optional[int]:
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
__snake_case = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__snake_case = name.replace('''conv_1.''' , '''conv_stem.''' )
if ".block." in name:
__snake_case = name.replace('''.block.''' , '''.''' )
if "exp_1x1" in name:
__snake_case = name.replace('''exp_1x1''' , '''expand_1x1''' )
if "red_1x1" in name:
__snake_case = name.replace('''red_1x1''' , '''reduce_1x1''' )
if ".local_rep.conv_3x3." in name:
__snake_case = name.replace('''.local_rep.conv_3x3.''' , '''.conv_kxk.''' )
if ".local_rep.conv_1x1." in name:
__snake_case = name.replace('''.local_rep.conv_1x1.''' , '''.conv_1x1.''' )
if ".norm." in name:
__snake_case = name.replace('''.norm.''' , '''.normalization.''' )
if ".conv." in name:
__snake_case = name.replace('''.conv.''' , '''.convolution.''' )
if ".conv_proj." in name:
__snake_case = name.replace('''.conv_proj.''' , '''.conv_projection.''' )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__snake_case = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__snake_case = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
__snake_case = name.replace('''expand_1x1''' , '''downsampling_layer.expand_1x1''' )
if "conv_3x3" in name:
__snake_case = name.replace('''conv_3x3''' , '''downsampling_layer.conv_3x3''' )
if "reduce_1x1" in name:
__snake_case = name.replace('''reduce_1x1''' , '''downsampling_layer.reduce_1x1''' )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
__snake_case = name.replace(f""".global_rep.{i}.weight""" , '''.layernorm.weight''' )
if f""".global_rep.{i}.bias""" in name:
__snake_case = name.replace(f""".global_rep.{i}.bias""" , '''.layernorm.bias''' )
if ".global_rep." in name:
__snake_case = name.replace('''.global_rep.''' , '''.transformer.''' )
if ".pre_norm_mha.0." in name:
__snake_case = name.replace('''.pre_norm_mha.0.''' , '''.layernorm_before.''' )
if ".pre_norm_mha.1.out_proj." in name:
__snake_case = name.replace('''.pre_norm_mha.1.out_proj.''' , '''.attention.output.dense.''' )
if ".pre_norm_ffn.0." in name:
__snake_case = name.replace('''.pre_norm_ffn.0.''' , '''.layernorm_after.''' )
if ".pre_norm_ffn.1." in name:
__snake_case = name.replace('''.pre_norm_ffn.1.''' , '''.intermediate.dense.''' )
if ".pre_norm_ffn.4." in name:
__snake_case = name.replace('''.pre_norm_ffn.4.''' , '''.output.dense.''' )
if ".transformer." in name:
__snake_case = name.replace('''.transformer.''' , '''.transformer.layer.''' )
if ".aspp_layer." in name:
__snake_case = name.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in name:
__snake_case = name.replace('''.aspp_pool.''' , '''.''' )
if "seg_head." in name:
__snake_case = name.replace('''seg_head.''' , '''segmentation_head.''' )
if "segmentation_head.classifier.classifier." in name:
__snake_case = name.replace('''segmentation_head.classifier.classifier.''' , '''segmentation_head.classifier.''' )
if "classifier.fc." in name:
__snake_case = name.replace('''classifier.fc.''' , '''classifier.''' )
elif (not base_model) and ("segmentation_head." not in name):
__snake_case = 'mobilevit.' + name
return name
def lowerCamelCase__ ( snake_case_ : List[Any] , snake_case_ : List[Any] , snake_case_ : Any=False ) -> Union[str, Any]:
if base_model:
__snake_case = ''
else:
__snake_case = 'mobilevit.'
for key in orig_state_dict.copy().keys():
__snake_case = orig_state_dict.pop(UpperCAmelCase_ )
if key[:8] == "encoder.":
__snake_case = key[8:]
if "qkv" in key:
__snake_case = key.split('''.''' )
__snake_case = int(key_split[0][6:] ) - 1
__snake_case = int(key_split[3] )
__snake_case = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
__snake_case = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__snake_case = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__snake_case = val[:dim, :]
__snake_case = val[dim : dim * 2, :]
__snake_case = val[-dim:, :]
else:
__snake_case = val[:dim]
__snake_case = val[dim : dim * 2]
__snake_case = val[-dim:]
else:
__snake_case = val
return orig_state_dict
def lowerCamelCase__ ( ) -> Union[str, Any]:
__snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__snake_case = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Tuple=False ) -> List[Any]:
__snake_case = get_mobilevit_config(UpperCAmelCase_ )
# load original state_dict
__snake_case = torch.load(UpperCAmelCase_ , map_location='''cpu''' )
# load 🤗 model
if mobilevit_name.startswith('''deeplabv3_''' ):
__snake_case = MobileViTForSemanticSegmentation(UpperCAmelCase_ ).eval()
else:
__snake_case = MobileViTForImageClassification(UpperCAmelCase_ ).eval()
__snake_case = convert_state_dict(UpperCAmelCase_ , UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__snake_case = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__snake_case = image_processor(images=prepare_img() , return_tensors='''pt''' )
__snake_case = model(**UpperCAmelCase_ )
__snake_case = outputs.logits
if mobilevit_name.startswith('''deeplabv3_''' ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__snake_case = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__snake_case = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__snake_case = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__snake_case = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
__snake_case = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
__snake_case = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , UpperCAmelCase_ , atol=1e-4 )
Path(UpperCAmelCase_ ).mkdir(exist_ok=UpperCAmelCase_ )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
__snake_case = {
'mobilevit_s': 'mobilevit-small',
'mobilevit_xs': 'mobilevit-x-small',
'mobilevit_xxs': 'mobilevit-xx-small',
'deeplabv3_mobilevit_s': 'deeplabv3-mobilevit-small',
'deeplabv3_mobilevit_xs': 'deeplabv3-mobilevit-x-small',
'deeplabv3_mobilevit_xxs': 'deeplabv3-mobilevit-xx-small',
}
print('''Pushing to the hub...''' )
__snake_case = model_mapping[mobilevit_name]
image_processor.push_to_hub(UpperCAmelCase_ , organization='''apple''' )
model.push_to_hub(UpperCAmelCase_ , organization='''apple''' )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
snake_case_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 592 |
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13 | 0 |
lowerCamelCase : List[str] = [0, 2, 4, 6, 8]
lowerCamelCase : Union[str, Any] = [1, 3, 5, 7, 9]
def lowercase__( A , A , A , A ):
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
snake_case__ : Optional[Any] = 0
for digit in range(1_0 ):
snake_case__ : Optional[int] = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , UpperCAmelCase_ , UpperCAmelCase_ )
return result
snake_case__ : Optional[int] = 0
for digita in range(1_0 ):
snake_case__ : List[Any] = digita
if (remainder + digita) % 2 == 0:
snake_case__ : int = ODD_DIGITS
else:
snake_case__ : Union[str, Any] = EVEN_DIGITS
for digita in other_parity_digits:
snake_case__ : Optional[int] = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , UpperCAmelCase_ , UpperCAmelCase_ , )
return result
def lowercase__( A = 9 ):
snake_case__ : List[Any] = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(UpperCAmelCase_ , 0 , [0] * length , UpperCAmelCase_ )
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 170 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
__snake_case = collections.namedtuple("""_Datasets""", ["""train""", """validation""", """test"""])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
__snake_case = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Any:
lowercase_ = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCAmelCase_ )[0]
@deprecated(UpperCAmelCase_ , """Please use tf.data to implement this functionality.""" )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->List[Any]:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase_ ) as bytestream:
lowercase_ = _readaa(UpperCAmelCase_ )
if magic != 20_51:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
lowercase_ = _readaa(UpperCAmelCase_ )
lowercase_ = _readaa(UpperCAmelCase_ )
lowercase_ = _readaa(UpperCAmelCase_ )
lowercase_ = bytestream.read(rows * cols * num_images )
lowercase_ = numpy.frombuffer(UpperCAmelCase_ , dtype=numpy.uinta )
lowercase_ = data.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 1 )
return data
@deprecated(UpperCAmelCase_ , """Please use tf.one_hot on tensors.""" )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Optional[Any]:
lowercase_ = labels_dense.shape[0]
lowercase_ = numpy.arange(UpperCAmelCase_ ) * num_classes
lowercase_ = numpy.zeros((num_labels, num_classes) )
lowercase_ = 1
return labels_one_hot
@deprecated(UpperCAmelCase_ , """Please use tf.data to implement this functionality.""" )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=10 ) ->str:
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=UpperCAmelCase_ ) as bytestream:
lowercase_ = _readaa(UpperCAmelCase_ )
if magic != 20_49:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
lowercase_ = _readaa(UpperCAmelCase_ )
lowercase_ = bytestream.read(UpperCAmelCase_ )
lowercase_ = numpy.frombuffer(UpperCAmelCase_ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCAmelCase_ , UpperCAmelCase_ )
return labels
class _a :
"""simple docstring"""
@deprecated(
SCREAMING_SNAKE_CASE_ , """Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" , )
def __init__( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[str] , lowercase_ : Tuple=False , lowercase_ : Dict=False , lowercase_ : List[Any]=dtypes.floataa , lowercase_ : str=True , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
lowercase_ = random_seed.get_seed(SCREAMING_SNAKE_CASE_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
lowercase_ = dtypes.as_dtype(SCREAMING_SNAKE_CASE_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
lowercase_ = 10_000
lowercase_ = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F"""images.shape: {images.shape} labels.shape: {labels.shape}"""
lowercase_ = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
lowercase_ = images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
lowercase_ = images.astype(numpy.floataa )
lowercase_ = numpy.multiply(SCREAMING_SNAKE_CASE_ , 1.0 / 255.0 )
lowercase_ = images
lowercase_ = labels
lowercase_ = 0
lowercase_ = 0
@property
def lowerCamelCase__ ( self : Any ):
'''simple docstring'''
return self._images
@property
def lowerCamelCase__ ( self : str ):
'''simple docstring'''
return self._labels
@property
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self._num_examples
@property
def lowerCamelCase__ ( self : int ):
'''simple docstring'''
return self._epochs_completed
def lowerCamelCase__ ( self : Dict , lowercase_ : str , lowercase_ : Tuple=False , lowercase_ : Any=True ):
'''simple docstring'''
if fake_data:
lowercase_ = [1] * 784
lowercase_ = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(SCREAMING_SNAKE_CASE_ )],
[fake_label for _ in range(SCREAMING_SNAKE_CASE_ )],
)
lowercase_ = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
lowercase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.images[perma]
lowercase_ = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
lowercase_ = self._num_examples - start
lowercase_ = self._images[start : self._num_examples]
lowercase_ = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
lowercase_ = numpy.arange(self._num_examples )
numpy.random.shuffle(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.images[perm]
lowercase_ = self.labels[perm]
# Start next epoch
lowercase_ = 0
lowercase_ = batch_size - rest_num_examples
lowercase_ = self._index_in_epoch
lowercase_ = self._images[start:end]
lowercase_ = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
lowercase_ = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCAmelCase_ , """Please write your own downloading logic.""" )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Tuple:
if not gfile.Exists(UpperCAmelCase_ ):
gfile.MakeDirs(UpperCAmelCase_ )
lowercase_ = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if not gfile.Exists(UpperCAmelCase_ ):
urllib.request.urlretrieve(UpperCAmelCase_ , UpperCAmelCase_ ) # noqa: S310
with gfile.GFile(UpperCAmelCase_ ) as f:
lowercase_ = f.size()
print("""Successfully downloaded""" , UpperCAmelCase_ , UpperCAmelCase_ , """bytes.""" )
return filepath
@deprecated(
UpperCAmelCase_ , """Please use alternatives such as:""" """ tensorflow_datasets.load(\'mnist\')""" )
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=dtypes.floataa , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=50_00 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=DEFAULT_SOURCE_URL , ) ->List[Any]:
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCAmelCase_ , one_hot=UpperCAmelCase_ , dtype=UpperCAmelCase_ , seed=UpperCAmelCase_ )
lowercase_ = fake()
lowercase_ = fake()
lowercase_ = fake()
return _Datasets(train=UpperCAmelCase_ , validation=UpperCAmelCase_ , test=UpperCAmelCase_ )
if not source_url: # empty string check
lowercase_ = DEFAULT_SOURCE_URL
lowercase_ = 'train-images-idx3-ubyte.gz'
lowercase_ = 'train-labels-idx1-ubyte.gz'
lowercase_ = 't10k-images-idx3-ubyte.gz'
lowercase_ = 't10k-labels-idx1-ubyte.gz'
lowercase_ = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + train_images_file )
with gfile.Open(UpperCAmelCase_ , """rb""" ) as f:
lowercase_ = _extract_images(UpperCAmelCase_ )
lowercase_ = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + train_labels_file )
with gfile.Open(UpperCAmelCase_ , """rb""" ) as f:
lowercase_ = _extract_labels(UpperCAmelCase_ , one_hot=UpperCAmelCase_ )
lowercase_ = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + test_images_file )
with gfile.Open(UpperCAmelCase_ , """rb""" ) as f:
lowercase_ = _extract_images(UpperCAmelCase_ )
lowercase_ = _maybe_download(
UpperCAmelCase_ , UpperCAmelCase_ , source_url + test_labels_file )
with gfile.Open(UpperCAmelCase_ , """rb""" ) as f:
lowercase_ = _extract_labels(UpperCAmelCase_ , one_hot=UpperCAmelCase_ )
if not 0 <= validation_size <= len(UpperCAmelCase_ ):
lowercase_ = (
'Validation size should be between 0 and '
f"""{len(UpperCAmelCase_ )}. Received: {validation_size}."""
)
raise ValueError(UpperCAmelCase_ )
lowercase_ = train_images[:validation_size]
lowercase_ = train_labels[:validation_size]
lowercase_ = train_images[validation_size:]
lowercase_ = train_labels[validation_size:]
lowercase_ = {'dtype': dtype, 'reshape': reshape, 'seed': seed}
lowercase_ = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase_ = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
lowercase_ = _DataSet(UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_ )
return _Datasets(train=UpperCAmelCase_ , validation=UpperCAmelCase_ , test=UpperCAmelCase_ )
| 451 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCamelCase_ = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _lowerCamelCase ( lowerCamelCase_: List[Any] , lowerCamelCase_: List[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , UpperCAmelCase_ ) | 256 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
__UpperCAmelCase : Any = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw ).convert("RGB" )
return image
def _UpperCamelCase ( UpperCamelCase ) -> Tuple:
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.weight", f"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm1.bias", f"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.weight", f"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.norm2.bias", f"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.qkv.weight", f"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.weight", f"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((f"visual_encoder.blocks.{i}.attn.proj.bias", f"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.weight", f"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc1.bias", f"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.weight", f"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((f"visual_encoder.blocks.{i}.mlp.fc2.bias", f"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> int:
"""simple docstring"""
__UpperCAmelCase : List[Any] = dct.pop(UpperCAmelCase_ )
__UpperCAmelCase : Optional[int] = val
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__UpperCAmelCase : Tuple = state_dict.pop(f"visual_encoder.blocks.{i}.attn.q_bias" )
__UpperCAmelCase : int = state_dict.pop(f"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
__UpperCAmelCase : List[Any] = torch.cat((q_bias, torch.zeros_like(UpperCAmelCase_ , requires_grad=UpperCAmelCase_ ), v_bias) )
__UpperCAmelCase : Any = qkv_bias
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
__UpperCAmelCase : int = 364 if 'coco' in model_name else 224
__UpperCAmelCase : List[Any] = BlipaVisionConfig(image_size=UpperCAmelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__UpperCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
__UpperCAmelCase : List[str] = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=UpperCAmelCase_ ).to_dict()
elif "t5-xl" in model_name:
__UpperCAmelCase : Union[str, Any] = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__UpperCAmelCase : List[str] = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
__UpperCAmelCase : Tuple = BlipaConfig(vision_config=UpperCAmelCase_ , text_config=UpperCAmelCase_ )
return config, image_size
@torch.no_grad()
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=False ) -> Dict:
"""simple docstring"""
__UpperCAmelCase : Tuple = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if 'opt' in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
__UpperCAmelCase : Union[str, Any] = tokenizer("\n" , add_special_tokens=UpperCAmelCase_ ).input_ids[0]
__UpperCAmelCase : str = get_blipa_config(UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ )
__UpperCAmelCase : Tuple = BlipaForConditionalGeneration(UpperCAmelCase_ ).eval()
__UpperCAmelCase : int = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
__UpperCAmelCase : int = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
__UpperCAmelCase : Any = 'cuda' if torch.cuda.is_available() else 'cpu'
__UpperCAmelCase : str = load_model_and_preprocess(
name=UpperCAmelCase_ , model_type=UpperCAmelCase_ , is_eval=UpperCAmelCase_ , device=UpperCAmelCase_ )
original_model.eval()
print("Done!" )
# update state dict keys
__UpperCAmelCase : Dict = original_model.state_dict()
__UpperCAmelCase : List[Any] = create_rename_keys(UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__UpperCAmelCase : Optional[Any] = state_dict.pop(UpperCAmelCase_ )
if key.startswith("Qformer.bert" ):
__UpperCAmelCase : Dict = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
__UpperCAmelCase : int = key.replace("self" , "attention" )
if "opt_proj" in key:
__UpperCAmelCase : Any = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
__UpperCAmelCase : Optional[Any] = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
__UpperCAmelCase : Dict = key.replace("opt" , "language" )
if key.startswith("t5" ):
__UpperCAmelCase : Optional[int] = key.replace("t5" , "language" )
__UpperCAmelCase : str = val
# read in qv biases
read_in_q_v_bias(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : List[str] = hf_model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert len(UpperCAmelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__UpperCAmelCase : Optional[Any] = load_demo_image()
__UpperCAmelCase : Any = vis_processors['eval'](UpperCAmelCase_ ).unsqueeze(0 ).to(UpperCAmelCase_ )
__UpperCAmelCase : Dict = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(UpperCAmelCase_ )
# create processor
__UpperCAmelCase : Tuple = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = BlipaProcessor(image_processor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
__UpperCAmelCase : str = processor(images=UpperCAmelCase_ , return_tensors="pt" ).pixel_values.to(UpperCAmelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ )
original_model.to(UpperCAmelCase_ )
hf_model.to(UpperCAmelCase_ )
with torch.no_grad():
if "opt" in model_name:
__UpperCAmelCase : Optional[Any] = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
__UpperCAmelCase : List[str] = hf_model(UpperCAmelCase_ , UpperCAmelCase_ ).logits
else:
__UpperCAmelCase : Tuple = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
__UpperCAmelCase : List[str] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__UpperCAmelCase : Dict = hf_model(UpperCAmelCase_ , UpperCAmelCase_ , labels=UpperCAmelCase_ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__UpperCAmelCase : List[Any] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=UpperCAmelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__UpperCAmelCase : Tuple = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=UpperCAmelCase_ )
else:
# cast to same type
__UpperCAmelCase : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(UpperCAmelCase_ ) , UpperCAmelCase_ , atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
__UpperCAmelCase : str = ''
__UpperCAmelCase : str = tokenizer(UpperCAmelCase_ , return_tensors="pt" ).input_ids.to(UpperCAmelCase_ )
__UpperCAmelCase : Union[str, Any] = original_model.generate({"image": original_pixel_values} )
__UpperCAmelCase : int = hf_model.generate(
UpperCAmelCase_ , UpperCAmelCase_ , do_sample=UpperCAmelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , UpperCAmelCase_ )
__UpperCAmelCase : Any = input_ids.shape[1]
__UpperCAmelCase : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCAmelCase_ )
__UpperCAmelCase : Dict = [text.strip() for text in output_text]
print("HF generation:" , UpperCAmelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if push_to_hub:
processor.push_to_hub(f"nielsr/{model_name}" )
hf_model.push_to_hub(f"nielsr/{model_name}" )
if __name__ == "__main__":
A = argparse.ArgumentParser()
A = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
A = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 77 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__SCREAMING_SNAKE_CASE : Optional[int] = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : Dict = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE : List[Any] = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 452 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ : Dict = TaTokenizerFast
A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCamelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
A__ : Optional[int] = ['note_seq']
def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> str:
requires_backends(self , ["note_seq"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
requires_backends(cls , ["note_seq"] )
@classmethod
def snake_case__ ( cls , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
requires_backends(cls , ["note_seq"] )
| 104 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _snake_case () -> List[Any]:
_lowercase =ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
))
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=UpperCAmelCase_ , default=1 , help='Number of TPU cores to use (1 or 8).')
# positional
parser.add_argument(
'training_script' , type=UpperCAmelCase_ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=UpperCAmelCase_)
return parser.parse_args()
def _snake_case () -> Optional[Any]:
_lowercase =parse_args()
# Import training_script as a module.
_lowercase =Path(args.training_script)
sys.path.append(str(script_fpath.parent.resolve()))
_lowercase =script_fpath.stem
_lowercase =importlib.import_module(UpperCAmelCase_)
# Patch sys.argv
_lowercase =[args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores)]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores)
if __name__ == "__main__":
main()
| 181 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __lowerCAmelCase ( _UpperCAmelCase ):
_a = 'donut-swin'
_a = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowerCAmelCase=224 , lowerCAmelCase=4 , lowerCAmelCase=3 , lowerCAmelCase=96 , lowerCAmelCase=[2, 2, 6, 2] , lowerCAmelCase=[3, 6, 12, 24] , lowerCAmelCase=7 , lowerCAmelCase=4.0 , lowerCAmelCase=True , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase="gelu" , lowerCAmelCase=False , lowerCAmelCase=0.02 , lowerCAmelCase=1e-5 , **lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
super().__init__(**SCREAMING_SNAKE_CASE_ )
_lowercase =image_size
_lowercase =patch_size
_lowercase =num_channels
_lowercase =embed_dim
_lowercase =depths
_lowercase =len(SCREAMING_SNAKE_CASE_ )
_lowercase =num_heads
_lowercase =window_size
_lowercase =mlp_ratio
_lowercase =qkv_bias
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =drop_path_rate
_lowercase =hidden_act
_lowercase =use_absolute_embeddings
_lowercase =layer_norm_eps
_lowercase =initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowercase =int(embed_dim * 2 ** (len(SCREAMING_SNAKE_CASE_ ) - 1) )
| 291 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Dict = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]:
require_version(deps[pkg] , UpperCAmelCase_ )
| 13 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _a ( _UpperCAmelCase ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = tempfile.mkdtemp()
a__ : Optional[Any] = 5
# Realm tok
a__ : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'test',
'question',
'this',
'is',
'the',
'first',
'second',
'third',
'fourth',
'fifth',
'record',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
a__ : Optional[Any] = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
a__ : Any = os.path.join(SCREAMING_SNAKE_CASE_ , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a__ : str = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def _A ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _A ( self ):
"""simple docstring"""
a__ : List[str] = RealmConfig(num_block_records=self.num_block_records )
return config
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=SCREAMING_SNAKE_CASE_ , )
return block_records
def _A ( self ):
"""simple docstring"""
a__ : Any = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def _A ( self ):
"""simple docstring"""
a__ : Union[str, Any] = self.get_config()
a__ : int = self.get_dummy_retriever()
a__ : Any = retriever.tokenizer
a__ : List[str] = np.array([0, 3] , dtype="long" )
a__ : Union[str, Any] = tokenizer(["Test question"] ).input_ids
a__ : Optional[Any] = tokenizer(
["the fourth"] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
a__ : int = config.reader_seq_len
a__ : Tuple = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors="np" )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = self.get_config()
a__ : Any = self.get_dummy_retriever()
a__ : Optional[int] = retriever.tokenizer
a__ : Any = np.array([0, 3, 5] , dtype="long" )
a__ : Optional[Any] = tokenizer(["Test question"] ).input_ids
a__ : List[str] = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_token_type_ids=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , ).input_ids
a__ : Union[str, Any] = config.reader_seq_len
a__ : str = retriever(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , answer_ids=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , return_tensors="np" )
self.assertEqual([False, True, True] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , SCREAMING_SNAKE_CASE_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : int = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
a__ : List[Any] = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
a__ : Union[str, Any] = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
a__ : str = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 191 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13 | 0 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
SCREAMING_SNAKE_CASE :Optional[int] = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , **_lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
if self.framework == "tf":
raise ValueError(F'''The {self.__class__} is only available in PyTorch.''' )
requires_backends(self , "vision" )
self.check_model_type(SCREAMING_SNAKE_CASE_ )
def __call__( self : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple = None , **_lowerCAmelCase : List[str] , ) -> Optional[Any]:
"""simple docstring"""
if "text_queries" in kwargs:
snake_case_ = kwargs.pop("text_queries" )
if isinstance(SCREAMING_SNAKE_CASE_ , (str, Image.Image) ):
snake_case_ = {'image': image, 'candidate_labels': candidate_labels}
else:
snake_case_ = image
snake_case_ = super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return results
def lowerCAmelCase__ ( self : Optional[int] , **_lowerCAmelCase : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
snake_case_ = {}
if "threshold" in kwargs:
snake_case_ = kwargs['threshold']
if "top_k" in kwargs:
snake_case_ = kwargs['top_k']
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self : Tuple , _lowerCAmelCase : Dict ) -> List[str]:
"""simple docstring"""
snake_case_ = load_image(inputs["image"] )
snake_case_ = inputs['candidate_labels']
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
snake_case_ = candidate_labels.split("," )
snake_case_ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(SCREAMING_SNAKE_CASE_ ):
snake_case_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework )
snake_case_ = self.image_processor(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework )
yield {
"is_last": i == len(SCREAMING_SNAKE_CASE_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
snake_case_ = model_inputs.pop("target_size" )
snake_case_ = model_inputs.pop("candidate_label" )
snake_case_ = model_inputs.pop("is_last" )
snake_case_ = self.model(**SCREAMING_SNAKE_CASE_ )
snake_case_ = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[Any]=None ) -> Tuple:
"""simple docstring"""
snake_case_ = []
for model_output in model_outputs:
snake_case_ = model_output['candidate_label']
snake_case_ = BaseModelOutput(SCREAMING_SNAKE_CASE_ )
snake_case_ = self.image_processor.post_process_object_detection(
outputs=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ , target_sizes=model_output["target_size"] )[0]
for index in outputs["scores"].nonzero():
snake_case_ = outputs['scores'][index].item()
snake_case_ = self._get_bounding_box(outputs["boxes"][index][0] )
snake_case_ = {'score': score, 'label': label, 'box': box}
results.append(SCREAMING_SNAKE_CASE_ )
snake_case_ = sorted(SCREAMING_SNAKE_CASE_ , key=lambda _lowerCAmelCase : x["score"] , reverse=SCREAMING_SNAKE_CASE_ )
if top_k:
snake_case_ = results[:top_k]
return results
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : List[Any] ) -> Dict[str, int]:
"""simple docstring"""
if self.framework != "pt":
raise ValueError("The ZeroShotObjectDetectionPipeline is only available in PyTorch." )
snake_case_ = box.int().tolist()
snake_case_ = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 283 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
snake_case_ = (3, 9, -11, 0, 7, 5, 1, -1)
snake_case_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE__ :
A_ : int
A_ : Node | None
class SCREAMING_SNAKE_CASE__ :
def __init__(self : Dict , a__ : Tuple ):
"""simple docstring"""
__snake_case = None
for i in sorted(SCREAMING_SNAKE_CASE_ , reverse=SCREAMING_SNAKE_CASE_ ):
__snake_case = Node(SCREAMING_SNAKE_CASE_ , self.head )
def __iter__(self : List[str] ):
"""simple docstring"""
__snake_case = self.head
while node:
yield node.data
__snake_case = node.next_node
def __len__(self : str ):
"""simple docstring"""
return sum(1 for _ in self )
def __str__(self : str ):
"""simple docstring"""
return " -> ".join([str(SCREAMING_SNAKE_CASE_ ) for node in self] )
def lowerCamelCase__ ( snake_case_ : SortedLinkedList , snake_case_ : SortedLinkedList ) -> SortedLinkedList:
return SortedLinkedList(list(UpperCAmelCase_ ) + list(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
snake_case_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 592 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Union[str, Any] = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
lowerCamelCase : Tuple = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def lowercase__( A , A ):
snake_case__ : Optional[Any] = {
'word_embeddings.weight': 'word_embeddings.weight',
'word_embeddings.norm.weight': 'word_embeddings_layernorm.weight',
'word_embeddings.norm.bias': 'word_embeddings_layernorm.bias',
'weight': 'ln_f.weight',
'bias': 'ln_f.bias',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
snake_case__ : Optional[int] = int(re.match(R'.*layer_(\d*).*' , UpperCAmelCase_ )[1] )
layer_number -= 3
return f'''h.{layer_number}.''' + key
def lowercase__( A ):
if dtype == torch.bool:
return 1 / 8
snake_case__ : Optional[Any] = re.search(R'[^\d](\d+)$' , str(UpperCAmelCase_ ) )
if bit_search is None:
raise ValueError(f'''`dtype` is not a valid dtype: {dtype}.''' )
snake_case__ : List[str] = int(bit_search.groups()[0] )
return bit_size // 8
def lowercase__( A , A , A , A , A ):
# Construct model
if bloom_config_file == "":
snake_case__ : Union[str, Any] = BloomConfig()
else:
snake_case__ : List[str] = BloomConfig.from_json_file(UpperCAmelCase_ )
if shard_model:
snake_case__ : Dict = os.listdir(UpperCAmelCase_ )
snake_case__ : Union[str, Any] = sorted(filter(lambda A : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase_ ) )
snake_case__ : Any = {'weight_map': {}, 'metadata': {}}
snake_case__ : int = 0
snake_case__ : int = None
snake_case__ : Dict = BloomConfig()
for j, file in enumerate(UpperCAmelCase_ ):
print('Processing file: {}'.format(UpperCAmelCase_ ) )
snake_case__ : Optional[Any] = None
for i in range(UpperCAmelCase_ ):
# load all TP files
snake_case__ : Optional[int] = file.replace('model_00' , f'''model_0{i}''' )
snake_case__ : Any = torch.load(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : Dict = list(temp.keys() )
for key in keys:
snake_case__ : Optional[Any] = temp.pop(UpperCAmelCase_ )
if tensors is None:
snake_case__ : List[str] = temp
else:
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : Tuple = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : str = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : List[str] = tensors[key] / pretraining_tp
torch.save(
UpperCAmelCase_ , os.path.join(
UpperCAmelCase_ , 'pytorch_model_{}-of-{}.bin'.format(str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase_ ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
snake_case__ : Tuple = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
snake_case__ : str = 'pytorch_model_{}-of-{}.bin'.format(
str(j + 1 ).zfill(5 ) , str(len(UpperCAmelCase_ ) ).zfill(5 ) )
snake_case__ : List[Any] = BloomConfig()
snake_case__ : List[Any] = pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case__ : str = total_size
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(UpperCAmelCase_ , WEIGHTS_NAME + '.index.json' ) , 'w' , encoding='utf-8' ) as f:
snake_case__ : Tuple = json.dumps(UpperCAmelCase_ , indent=2 , sort_keys=UpperCAmelCase_ ) + '\n'
f.write(UpperCAmelCase_ )
else:
snake_case__ : str = BloomModel(UpperCAmelCase_ )
snake_case__ : List[Any] = os.listdir(UpperCAmelCase_ )
snake_case__ : Tuple = sorted(filter(lambda A : s.startswith('layer' ) and "model_00" in s , UpperCAmelCase_ ) )
snake_case__ : List[str] = None
for i, file in enumerate(UpperCAmelCase_ ):
snake_case__ : Union[str, Any] = None
for i in range(UpperCAmelCase_ ):
# load all TP files
snake_case__ : Optional[Any] = file.replace('model_00' , f'''model_0{i}''' )
snake_case__ : List[str] = torch.load(os.path.join(UpperCAmelCase_ , UpperCAmelCase_ ) , map_location='cpu' )
# Rename keys in the transformers names
snake_case__ : List[Any] = list(temp.keys() )
for key in keys:
snake_case__ : int = temp.pop(UpperCAmelCase_ )
if tensors is None:
snake_case__ : List[str] = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(UpperCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
snake_case__ : Any = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
snake_case__ : int = torch.cat([tensors[key], temp[key]] , dim=UpperCAmelCase_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(UpperCAmelCase_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
snake_case__ : Union[str, Any] = tensors[key] / pretraining_tp
snake_case__ : int = model.load_state_dict(UpperCAmelCase_ , strict=UpperCAmelCase_ )
assert not other_keys.unexpected_keys, f'''The keys {other_keys.unexpected_keys} are unexpected'''
if missing_keys is None:
snake_case__ : str = set(other_keys.missing_keys )
else:
snake_case__ : int = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'''The keys {missing_keys} are missing'''
# Save pytorch-model
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
snake_case__ : Optional[int] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case__ : List[str] = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(f'''Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}''' )
if config.torch_dtype is not None:
snake_case__ : Dict = model.to(config.torch_dtype )
torch.save(model.state_dict() , UpperCAmelCase_ )
print(f'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bloom_checkpoint_path',
default=None,
type=str,
required=True,
help='Path to the Megatron-LM checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--bloom_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--shard_model',
action='store_true',
help='An optional setting to shard the output model \nThis enables sharding the converted checkpoint',
)
parser.add_argument(
'--pretraining_tp',
default=4,
type=int,
help='Pretraining TP rank that has been used when training the model in Megatron-LM \n',
)
lowerCamelCase : List[str] = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 170 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
if len(UpperCAmelCase_ ) != 32:
raise ValueError('Input must be of length 32' )
__lowerCamelCase : Dict = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:]
__lowerCamelCase : str = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = B''
for char in message:
bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' )
__lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase_ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]:
if len(UpperCAmelCase_ ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ):
__lowerCamelCase : Any = bit_string[pos : pos + 5_12]
__lowerCamelCase : Optional[int] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' )
__lowerCamelCase : Optional[int] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase_ , 2 )
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
return (a + b) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowerCamelCase : Dict = 0x67_45_23_01
__lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89
__lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe
__lowerCamelCase : Union[str, Any] = 0x10_32_54_76
__lowerCamelCase : List[str] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase_ ):
__lowerCamelCase : Dict = aa
__lowerCamelCase : Tuple = ba
__lowerCamelCase : List[Any] = ca
__lowerCamelCase : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowerCamelCase : List[str] = d ^ (b & (c ^ d))
__lowerCamelCase : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowerCamelCase : Optional[int] = c ^ (d & (b ^ c))
__lowerCamelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
__lowerCamelCase : str = b ^ c ^ d
__lowerCamelCase : Any = (3 * i + 5) % 16
else:
__lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ ))
__lowerCamelCase : int = (7 * i) % 16
__lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowerCamelCase : Optional[Any] = d
__lowerCamelCase : Tuple = c
__lowerCamelCase : Optional[int] = b
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__snake_case = TypeVar("""T""")
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return (position - 1) // 2
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return (2 * position) + 1
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
return (2 * position) + 2
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
lowercase_ = []
lowercase_ = {}
lowercase_ = 0
def __len__( self : Any ):
'''simple docstring'''
return self.elements
def __repr__( self : List[Any] ):
'''simple docstring'''
return str(self.heap )
def lowerCamelCase__ ( self : Dict ):
'''simple docstring'''
return self.elements == 0
def lowerCamelCase__ ( self : Any , lowercase_ : Optional[int] , lowercase_ : Any ):
'''simple docstring'''
self.heap.append((elem, weight) )
lowercase_ = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowercase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase_ = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE_ )
return elem
def lowerCamelCase__ ( self : Dict , lowercase_ : Dict , lowercase_ : Optional[Any] ):
'''simple docstring'''
lowercase_ = self.position_map[elem]
lowercase_ = (elem, weight)
if position > 0:
lowercase_ = get_parent_position(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE_ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE_ )
else:
self._bubble_down(SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Optional[int] , lowercase_ : List[Any] ):
'''simple docstring'''
lowercase_ = self.position_map[elem]
if curr_pos == 0:
return None
lowercase_ = get_parent_position(SCREAMING_SNAKE_CASE_ )
lowercase_ = self.heap[curr_pos]
lowercase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._bubble_up(SCREAMING_SNAKE_CASE_ )
return None
def lowerCamelCase__ ( self : str , lowercase_ : Dict ):
'''simple docstring'''
lowercase_ = self.position_map[elem]
lowercase_ = self.heap[curr_pos]
lowercase_ = get_child_left_position(SCREAMING_SNAKE_CASE_ )
lowercase_ = get_child_right_position(SCREAMING_SNAKE_CASE_ )
if child_left_position < self.elements and child_right_position < self.elements:
lowercase_ = self.heap[child_left_position]
lowercase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._bubble_down(SCREAMING_SNAKE_CASE_ )
if child_left_position < self.elements:
lowercase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._bubble_down(SCREAMING_SNAKE_CASE_ )
else:
return None
if child_right_position < self.elements:
lowercase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return self._bubble_down(SCREAMING_SNAKE_CASE_ )
return None
def lowerCamelCase__ ( self : int , lowercase_ : Any , lowercase_ : int ):
'''simple docstring'''
lowercase_ = self.heap[nodea_pos][0]
lowercase_ = self.heap[nodea_pos][0]
lowercase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase_ = nodea_pos
lowercase_ = nodea_pos
class _a ( Generic[T] ):
"""simple docstring"""
def __init__( self : str ):
'''simple docstring'''
lowercase_ = {}
lowercase_ = 0
def __repr__( self : str ):
'''simple docstring'''
return str(self.connections )
def __len__( self : Tuple ):
'''simple docstring'''
return self.nodes
def lowerCamelCase__ ( self : int , lowercase_ : Dict ):
'''simple docstring'''
if node not in self.connections:
lowercase_ = {}
self.nodes += 1
def lowerCamelCase__ ( self : List[Any] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Union[str, Any] ):
'''simple docstring'''
self.add_node(SCREAMING_SNAKE_CASE_ )
self.add_node(SCREAMING_SNAKE_CASE_ )
lowercase_ = weight
lowercase_ = weight
def A_ ( SCREAMING_SNAKE_CASE_ , ) ->tuple[dict[T, int], dict[T, T | None]]:
lowercase_ = {node: maxsize for node in graph.connections}
lowercase_ = {node: None for node in graph.connections}
lowercase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(UpperCAmelCase_ , UpperCAmelCase_ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase_ = priority_queue.extract_min()
lowercase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase_ , dist[neighbour] )
lowercase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(UpperCAmelCase_ , dist[neighbour] )
lowercase_ = node
return dist, parent
| 451 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'rwkv'
lowerCamelCase : Any = {'max_position_embeddings': 'context_length'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Tuple = context_length
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : Optional[Any] = layer_norm_epsilon
__lowerCamelCase : int = rescale_every
__lowerCamelCase : Tuple = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
UpperCamelCase_ = logging.get_logger(__name__)
@add_end_docstrings(_UpperCAmelCase )
class _SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
def __init__( self : List[Any] , **snake_case_ : Optional[int] ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Union[str, Any] , snake_case_ : int , **snake_case_ : Union[str, Any] ):
"""simple docstring"""
return super().__call__(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Any , **snake_case_ : Optional[Any] ):
"""simple docstring"""
A : Dict = {}
if "candidate_labels" in kwargs:
A : List[str] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
A : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _UpperCAmelCase ( self : List[Any] , snake_case_ : int , snake_case_ : Optional[int]=None , snake_case_ : Union[str, Any]="This is a photo of {}." ):
"""simple docstring"""
A : Union[str, Any] = load_image(SCREAMING_SNAKE_CASE_ )
A : int = self.image_processor(images=[image] , return_tensors=self.framework )
A : Optional[Any] = candidate_labels
A : int = [hypothesis_template.format(SCREAMING_SNAKE_CASE_ ) for x in candidate_labels]
A : Union[str, Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors=self.framework , padding=SCREAMING_SNAKE_CASE_ )
A : List[str] = [text_inputs]
return inputs
def _UpperCAmelCase ( self : List[Any] , snake_case_ : List[Any] ):
"""simple docstring"""
A : Dict = model_inputs.pop('''candidate_labels''' )
A : int = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , SCREAMING_SNAKE_CASE_ ):
A : List[Any] = text_inputs[0]
else:
# Batching case.
A : Optional[Any] = text_inputs[0][0]
A : Optional[int] = self.model(**SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A : int = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : List[str] ):
"""simple docstring"""
A : str = model_outputs.pop('''candidate_labels''' )
A : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
A : List[Any] = logits.softmax(dim=-1 ).squeeze(-1 )
A : Dict = probs.tolist()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A : List[Any] = [scores]
elif self.framework == "tf":
A : Union[str, Any] = stable_softmax(SCREAMING_SNAKE_CASE_ , axis=-1 )
A : Optional[Any] = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
A : Optional[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , key=lambda snake_case_ : -x[0] )
]
return result | 256 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> str:
"""simple docstring"""
__UpperCAmelCase : int = len(UpperCAmelCase_ )
__UpperCAmelCase : int = len(UpperCAmelCase_ )
__UpperCAmelCase : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__UpperCAmelCase : list = []
for char_count in range(UpperCAmelCase_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(UpperCAmelCase_ )
if __name__ == "__main__":
print(alternative_string_arrange("""AB""", """XYZ"""), end=""" """)
| 77 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Dict = XGLMConfig
lowerCamelCase : List[str] = {}
lowerCamelCase : Union[str, Any] = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any:
__lowerCamelCase : int = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = ffn_dim
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : List[Any] = activation_dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : int = None
__lowerCamelCase : int = 0
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Tuple = 1
def lowercase_ ( self ) -> Any:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : str = self.get_config()
__lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase_ ( self ) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = config_and_inputs
__lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Any = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : str = TFXGLMModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 )
def lowercase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowercase_ ( self ) -> Any:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]:
__lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__lowerCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] )
__lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = 'left'
# use different length sentences to test batching
__lowerCamelCase : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
__SCREAMING_SNAKE_CASE : Dict = 8.9_88E9 # units = N * m^s * C^-2
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
A_ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A_ = abs(UpperCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A_ = abs(UpperCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A_ = (COULOMBS_CONSTANT * charge_product / abs(UpperCAmelCase_ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 452 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A__ : Optional[int] = ['input_values', 'padding_mask']
def __init__( self , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 24000 , SCREAMING_SNAKE_CASE__ = 0.0 , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> str:
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
A__ = chunk_length_s
A__ = overlap
@property
def snake_case__ ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def snake_case__ ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
A__ = True
A__ = bool(
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
A__ = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
A__ = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
A__ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
A__ = [np.asarray(SCREAMING_SNAKE_CASE_ ).T]
# verify inputs are valid
for idx, example in enumerate(SCREAMING_SNAKE_CASE_ ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
A__ = None
A__ = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
A__ = min(array.shape[0] for array in raw_audio )
A__ = int(np.floor(max_length / self.chunk_stride ) )
A__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
A__ = max(array.shape[0] for array in raw_audio )
A__ = int(np.ceil(max_length / self.chunk_stride ) )
A__ = (nb_step - 1) * self.chunk_stride + self.chunk_length
A__ = 'max_length'
else:
A__ = input_values
# normal padding on batch
if padded_inputs is None:
A__ = self.pad(
SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , )
if padding:
A__ = padded_inputs.pop("attention_mask" )
A__ = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
A__ = example[..., None]
input_values.append(example.T )
A__ = input_values
if return_tensors is not None:
A__ = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 104 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
_SCREAMING_SNAKE_CASE = tuple[int, int]
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self :Optional[Any], snake_case :Any, snake_case :Tuple):
"""simple docstring"""
_lowercase =vertices
_lowercase ={
(min(SCREAMING_SNAKE_CASE_), max(SCREAMING_SNAKE_CASE_)): weight for edge, weight in edges.items()
}
def UpperCamelCase__ ( self :List[str], snake_case :Union[str, Any], snake_case :Optional[Any]):
"""simple docstring"""
self.vertices.add(edge[0])
self.vertices.add(edge[1])
_lowercase =weight
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =Graph({min(self.vertices)}, {})
_lowercase =42
_lowercase =42
_lowercase =42
_lowercase =42
while len(subgraph.vertices) < len(self.vertices):
_lowercase =max(self.edges.values()) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_lowercase =edge
_lowercase =weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_)
return subgraph
def _snake_case (_snake_case : str = "p107_network.txt") -> int:
_lowercase =os.path.abspath(os.path.dirname(UpperCAmelCase_))
_lowercase =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
_lowercase ={}
_lowercase =42
_lowercase =42
_lowercase =42
with open(UpperCAmelCase_) as f:
_lowercase =f.read().strip().split('\n')
_lowercase =[line.split(',') for line in data]
for edgea in range(1 , len(UpperCAmelCase_)):
for edgea in range(UpperCAmelCase_):
if adjaceny_matrix[edgea][edgea] != "-":
_lowercase =int(adjaceny_matrix[edgea][edgea])
_lowercase =Graph(set(range(len(UpperCAmelCase_))) , UpperCAmelCase_)
_lowercase =graph.prims_algorithm()
_lowercase =sum(graph.edges.values())
_lowercase =sum(subgraph.edges.values())
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 181 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13 | 0 |
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowercase_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class __lowerCAmelCase :
_a = 42
_a = None
_a = None
_a = None
_a = None
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =_str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
'''simple docstring'''
return F'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def A__ ( self ) -> int:
'''simple docstring'''
return self.major, self.minor, self.patch
def A__ ( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(F'''{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.''' )
def __eq__( self , lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
try:
_lowercase =self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
_lowercase =self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def A__ ( cls , lowerCAmelCase ) -> List[str]:
'''simple docstring'''
_lowercase ={f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def A__ ( self ) -> str:
'''simple docstring'''
return self.version_str
def a ( A__ : Union[str, Any] ) -> str:
"""simple docstring"""
_lowercase =_VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def a ( A__ : List[str] ) -> Dict:
"""simple docstring"""
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 291 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCamelCase = random.Random()
def SCREAMING_SNAKE_CASE( __UpperCamelCase , __UpperCamelCase=1.0 , __UpperCamelCase=None , __UpperCamelCase=None ) -> Union[str, Any]:
if rng is None:
a__ : Any = global_rng
a__ : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=400 , __UpperCAmelCase=2000 , __UpperCAmelCase=2048 , __UpperCAmelCase=128 , __UpperCAmelCase=1 , __UpperCAmelCase=512 , __UpperCAmelCase=30 , __UpperCAmelCase=4_4100 , ):
"""simple docstring"""
a__ : Dict = parent
a__ : Union[str, Any] = batch_size
a__ : Any = min_seq_length
a__ : List[Any] = max_seq_length
a__ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ : Any = spectrogram_length
a__ : Any = feature_size
a__ : List[str] = num_audio_channels
a__ : Dict = hop_length
a__ : Optional[Any] = chunk_length
a__ : Optional[Any] = sampling_rate
def _A ( self ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _A ( self , __UpperCAmelCase=False , __UpperCAmelCase=False ):
"""simple docstring"""
def _flatten(__UpperCAmelCase ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE_ ) )
if equal_length:
a__ : Tuple = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ : Dict = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
a__ : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a ( _UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
A :Dict = TvltFeatureExtractor
def _A ( self ):
"""simple docstring"""
a__ : Any = TvltFeatureExtractionTester(self )
def _A ( self ):
"""simple docstring"""
a__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "spectrogram_length" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "feature_size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "num_audio_channels" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "hop_length" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "chunk_length" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , "sampling_rate" ) )
def _A ( self ):
"""simple docstring"""
a__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : List[str] = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE_ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE_ )
a__ : List[Any] = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE_ )
a__ : Tuple = feat_extract_first.to_dict()
a__ : Optional[int] = feat_extract_second.to_dict()
a__ : Union[str, Any] = dict_first.pop("mel_filters" )
a__ : List[Any] = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
a__ : int = os.path.join(SCREAMING_SNAKE_CASE_ , "feat_extract.json" )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE_ )
a__ : Any = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE_ )
a__ : Dict = feat_extract_first.to_dict()
a__ : Optional[Any] = feat_extract_second.to_dict()
a__ : Union[str, Any] = dict_first.pop("mel_filters" )
a__ : str = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : Dict = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
a__ : int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
a__ : Union[str, Any] = [np.asarray(SCREAMING_SNAKE_CASE_ ) for speech_input in speech_inputs]
# Test not batched input
a__ : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
a__ : Any = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
a__ : Dict = feature_extractor(
SCREAMING_SNAKE_CASE_ , return_tensors="np" , sampling_rate=4_4100 , mask_audio=SCREAMING_SNAKE_CASE_ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
a__ : Optional[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
a__ : int = np.asarray(SCREAMING_SNAKE_CASE_ )
a__ : Dict = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors="np" , sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _A ( self , __UpperCAmelCase ):
"""simple docstring"""
a__ : Union[str, Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
a__ : Union[str, Any] = ds.sort("id" ).select(range(SCREAMING_SNAKE_CASE_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _A ( self ):
"""simple docstring"""
a__ : Tuple = self._load_datasamples(1 )
a__ : List[Any] = TvltFeatureExtractor()
a__ : List[str] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
a__ : str = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 191 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE :Optional[int] = {
"""configuration_blip_2""": [
"""BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Blip2Config""",
"""Blip2QFormerConfig""",
"""Blip2VisionConfig""",
],
"""processing_blip_2""": ["""Blip2Processor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE :Dict = [
"""BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Blip2Model""",
"""Blip2QFormerModel""",
"""Blip2PreTrainedModel""",
"""Blip2ForConditionalGeneration""",
"""Blip2VisionModel""",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE :Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]:
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : Any = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[int] = num_patches + 1
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (self.image_size, self.image_size)
__lowerCamelCase : str = (self.patch_size, self.patch_size)
__lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Tuple = self.type_sequence_label_size
__lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase_ ( self ) -> None:
__lowerCamelCase : str = FlaxViTModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowercase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
__lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
snake_case_ = """bart"""
snake_case_ = True
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def lowerCamelCase__ ( ) -> Optional[Any]:
if LOAD_DENSE_INDEX:
__snake_case = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''' )
__snake_case = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''' ).to('''cuda:0''' )
__snake_case = qar_model.eval()
else:
__snake_case = (None, None)
if MODEL_TYPE == "bart":
__snake_case = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''' )
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''' ).to('''cuda:0''' )
__snake_case = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''' )
sas_model.load_state_dict(save_dict['''model'''] )
__snake_case = sas_model.eval()
else:
__snake_case = make_qa_sas_model(
model_name='''t5-small''' , from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''' , device='''cuda:0''' )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def lowerCamelCase__ ( ) -> int:
if LOAD_DENSE_INDEX:
__snake_case = faiss.StandardGpuResources()
__snake_case = datasets.load_dataset(path='''wiki_snippets''' , name='''wiki40b_en_100_0''' )['train']
__snake_case = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''' , dtype='''float32''' , mode='''r''' , shape=(wikiaab_passages.num_rows, 128) , )
__snake_case = faiss.IndexFlatIP(128 )
__snake_case = faiss.index_cpu_to_gpu(UpperCAmelCase_ , 1 , UpperCAmelCase_ )
wikiaab_gpu_index_flat.add(UpperCAmelCase_ ) # TODO fix for larger GPU
else:
__snake_case = (None, None)
__snake_case = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def lowerCamelCase__ ( ) -> Dict:
__snake_case = datasets.load_dataset('''eli5''' , name='''LFQA_reddit''' )
__snake_case = elia['train_eli5']
__snake_case = np.memmap(
'''eli5_questions_reps.dat''' , dtype='''float32''' , mode='''r''' , shape=(elia_train.num_rows, 128) )
__snake_case = faiss.IndexFlatIP(128 )
eli5_train_q_index.add(UpperCAmelCase_ )
return (elia_train, eli5_train_q_index)
snake_case_ = load_indexes()
snake_case_ = load_models()
snake_case_ = load_train_data()
def lowerCamelCase__ ( snake_case_ : int , snake_case_ : Tuple=10 ) -> List[str]:
__snake_case = embed_questions_for_retrieval([question] , UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case = eli5_train_q_index.search(UpperCAmelCase_ , UpperCAmelCase_ )
__snake_case = [elia_train[int(UpperCAmelCase_ )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Tuple="wiki40b" , snake_case_ : Dict="dense" , snake_case_ : int=10 ) -> Any:
if source == "none":
__snake_case = (' <P> '.join(['''''' for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
__snake_case = query_qa_dense_index(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
__snake_case = query_es_index(
UpperCAmelCase_ , UpperCAmelCase_ , index_name='''english_wiki40b_snippets_100w''' , n_results=UpperCAmelCase_ , )
__snake_case = [
(res['article_title'], res['section_title'].strip(), res['score'], res['passage_text']) for res in hit_lst
]
__snake_case = 'question: {} context: {}'.format(UpperCAmelCase_ , UpperCAmelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case_ : None),
} )
def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any]=64 , snake_case_ : Tuple=256 , snake_case_ : Union[str, Any]=False , snake_case_ : int=2 , snake_case_ : int=0.95 , snake_case_ : Union[str, Any]=0.8 ) -> Optional[int]:
with torch.no_grad():
__snake_case = qa_sas_generate(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , num_answers=1 , num_beams=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ , do_sample=UpperCAmelCase_ , temp=UpperCAmelCase_ , top_p=UpperCAmelCase_ , top_k=UpperCAmelCase_ , max_input_length=1024 , device='''cuda:0''' , )[0]
return (answer, support_list)
st.title('Long Form Question Answering with ELI5')
# Start sidebar
snake_case_ = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
snake_case_ = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
snake_case_ = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
snake_case_ = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
snake_case_ = st.sidebar.checkbox('Demo options')
if demo_options:
snake_case_ = st.sidebar.selectbox(
'',
action_list,
index=3,
)
snake_case_ = action_list.index(action_st)
snake_case_ = st.sidebar.selectbox(
'',
['Show full text of passages', 'Show passage section titles'],
index=0,
)
snake_case_ = show_type == """Show full text of passages"""
else:
snake_case_ = 3
snake_case_ = True
snake_case_ = st.sidebar.checkbox('Retrieval options')
if retrieval_options:
snake_case_ = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
snake_case_ = st.sidebar.selectbox('Which Wikipedia format should the model use?', ['wiki40b', 'none'])
snake_case_ = st.sidebar.selectbox('Which Wikipedia indexer should the model use?', ['dense', 'sparse', 'mixed'])
else:
snake_case_ = """wiki40b"""
snake_case_ = """dense"""
snake_case_ = """beam"""
snake_case_ = 2
snake_case_ = 64
snake_case_ = 256
snake_case_ = None
snake_case_ = None
snake_case_ = st.sidebar.checkbox('Generation options')
if generate_options:
snake_case_ = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
snake_case_ = st.sidebar.selectbox('Would you like to use beam search or sample an answer?', ['beam', 'sampled'])
snake_case_ = st.sidebar.slider(
'Minimum generation length', min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
snake_case_ = st.sidebar.slider(
'Maximum generation length', min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
snake_case_ = st.sidebar.slider('Beam size', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
snake_case_ = st.sidebar.slider(
'Nucleus sampling p', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
snake_case_ = st.sidebar.slider(
'Temperature', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
snake_case_ = None
# start main text
snake_case_ = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
snake_case_ = st.selectbox(
'What would you like to ask? ---- select <MY QUESTION> to enter a new query',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
snake_case_ = st.text_input('Enter your question here:', '')
else:
snake_case_ = question_s
if st.button('Show me!'):
if action in [0, 1, 3]:
if index_type == "mixed":
snake_case_ = make_support(question, source=wiki_source, method='dense', n_results=10)
snake_case_ = make_support(question, source=wiki_source, method='sparse', n_results=10)
snake_case_ = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
snake_case_ = support_list[:10]
snake_case_ = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
snake_case_ = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
snake_case_ = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == 'sampled'),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('### The model generated answer is:')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('--- \n ### The model is drawing information from the following Wikipedia passages:')
for i, res in enumerate(support_list):
snake_case_ = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(' ', '_'))
snake_case_ = res[1].strip()
if sec_titles == "":
snake_case_ = """[{}]({})""".format(res[0], wiki_url)
else:
snake_case_ = sec_titles.split(' & ')
snake_case_ = """ & """.join(
['[{}]({}#{})'.format(sec.strip(), wiki_url, sec.strip().replace(' ', '_')) for sec in sec_list]
)
st.markdown(
'{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'> <span style=\"font-family:arial; font-size:10pt;\">' + res[-1] + '</span>', unsafe_allow_html=True
)
if action in [2, 3]:
snake_case_ = find_nearest_training(question)
snake_case_ = nn_train_list[0]
st.markdown(
'--- \n ### The most similar question in the ELI5 training set was: \n\n {}'.format(train_exple['title'])
)
snake_case_ = [
"""{}. {}""".format(i + 1, ' \n'.join([line.strip() for line in ans.split('\n') if line.strip() != '']))
for i, (ans, sc) in enumerate(zip(train_exple['answers']['text'], train_exple['answers']['score']))
if i == 0 or sc > 2
]
st.markdown('##### Its answers were: \n\n {}'.format('\n'.join(answers_st)))
snake_case_ = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 592 |
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13 | 0 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Union[str, Any] = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
snake_case__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
snake_case__ : str = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
snake_case__ : List[Any] = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6_0_0_0,
'return_attention_mask': False,
'do_normalize': True,
}
snake_case__ : List[str] = tempfile.mkdtemp()
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE_ ) + '\n' )
# load decoder from hub
snake_case__ : List[str] = 'hf-internal-testing/ngram-beam-search-decoder'
def UpperCAmelCase__ ( self : List[Any] , **_lowerCamelCase : Tuple ):
snake_case__ : Dict = self.add_kwargs_tokens_map.copy()
kwargs.update(SCREAMING_SNAKE_CASE_ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Tuple , **_lowerCamelCase : Tuple ):
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Union[str, Any] , **_lowerCamelCase : Any ):
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : str ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : str = self.get_tokenizer()
snake_case__ : Dict = self.get_feature_extractor()
snake_case__ : List[Any] = self.get_decoder()
snake_case__ : List[Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE_ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : int ):
snake_case__ : Union[str, Any] = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
snake_case__ : Dict = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def UpperCAmelCase__ ( self : str ):
snake_case__ : Optional[Any] = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Dict = self.get_feature_extractor()
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Any = self.get_decoder()
snake_case__ : Optional[int] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = floats_list((3, 1_0_0_0) )
snake_case__ : Union[str, Any] = feature_extractor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
snake_case__ : Union[str, Any] = processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ ( self : str ):
snake_case__ : Union[str, Any] = self.get_feature_extractor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Optional[Any] = self.get_decoder()
snake_case__ : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
snake_case__ : int = 'This is a test string'
snake_case__ : Any = processor(text=SCREAMING_SNAKE_CASE_ )
snake_case__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ ( self : Union[str, Any] , _lowerCamelCase : Union[str, Any]=(2, 1_0, 1_6) , _lowerCamelCase : Any=7_7 ):
np.random.seed(SCREAMING_SNAKE_CASE_ )
return np.random.rand(*SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Any ):
snake_case__ : str = self.get_feature_extractor()
snake_case__ : Union[str, Any] = self.get_tokenizer()
snake_case__ : Optional[Any] = self.get_decoder()
snake_case__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[int] = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
snake_case__ : Optional[int] = processor.decode(SCREAMING_SNAKE_CASE_ )
snake_case__ : List[str] = decoder.decode_beams(SCREAMING_SNAKE_CASE_ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : Union[str, Any] ):
snake_case__ : Optional[int] = self.get_feature_extractor()
snake_case__ : Any = self.get_tokenizer()
snake_case__ : List[Any] = self.get_decoder()
snake_case__ : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
snake_case__ : List[str] = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
else:
with get_context(SCREAMING_SNAKE_CASE_ ).Pool() as pool:
snake_case__ : str = processor.batch_decode(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as p:
snake_case__ : Dict = decoder.decode_beams_batch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
snake_case__ : List[Any] = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.logit_score )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , decoded_processor.lm_score )
def UpperCAmelCase__ ( self : int ):
snake_case__ : str = self.get_feature_extractor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Any = self.get_decoder()
snake_case__ : str = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
snake_case__ : Any = self._get_dummy_logits()
snake_case__ : int = 1_5
snake_case__ : Dict = -2_0.0
snake_case__ : Optional[Any] = -4.0
snake_case__ : List[Any] = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
snake_case__ : List[Any] = decoded_processor_out.text
snake_case__ : Optional[int] = list(SCREAMING_SNAKE_CASE_ )
with get_context('fork' ).Pool() as pool:
snake_case__ : List[Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beam_width=SCREAMING_SNAKE_CASE_ , beam_prune_logp=SCREAMING_SNAKE_CASE_ , token_min_logp=SCREAMING_SNAKE_CASE_ , )
snake_case__ : Union[str, Any] = [d[0][0] for d in decoded_decoder_out]
snake_case__ : Tuple = [d[0][2] for d in decoded_decoder_out]
snake_case__ : List[Any] = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , SCREAMING_SNAKE_CASE_ )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
self.assertTrue(np.array_equal(SCREAMING_SNAKE_CASE_ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
snake_case__ : List[Any] = self.get_feature_extractor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : int = self.get_decoder()
snake_case__ : Dict = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = self._get_dummy_logits()
snake_case__ : Union[str, Any] = 2.0
snake_case__ : str = 5.0
snake_case__ : int = -2_0.0
snake_case__ : Optional[Any] = True
snake_case__ : str = processor.batch_decode(
SCREAMING_SNAKE_CASE_ , alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
snake_case__ : Any = decoded_processor_out.text
snake_case__ : int = list(SCREAMING_SNAKE_CASE_ )
decoder.reset_params(
alpha=SCREAMING_SNAKE_CASE_ , beta=SCREAMING_SNAKE_CASE_ , unk_score_offset=SCREAMING_SNAKE_CASE_ , lm_score_boundary=SCREAMING_SNAKE_CASE_ , )
with get_context('fork' ).Pool() as pool:
snake_case__ : Union[str, Any] = decoder.decode_beams_batch(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
snake_case__ : Tuple = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , SCREAMING_SNAKE_CASE_ )
snake_case__ : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Any ):
snake_case__ : Any = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case__ : Dict = processor.decoder.model_container[processor.decoder._model_key]
snake_case__ : str = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
snake_case__ : Optional[int] = os.listdir(SCREAMING_SNAKE_CASE_ )
snake_case__ : Union[str, Any] = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : List[Any] ):
snake_case__ : Union[str, Any] = snapshot_download('hf-internal-testing/processor_with_lm' )
snake_case__ : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case__ : Tuple = processor.decoder.model_container[processor.decoder._model_key]
snake_case__ : int = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
snake_case__ : str = os.listdir(SCREAMING_SNAKE_CASE_ )
snake_case__ : Dict = os.listdir(SCREAMING_SNAKE_CASE_ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : Optional[int] = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case__ : Dict = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case__ : Tuple = floats_list((3, 1_0_0_0) )
snake_case__ : Optional[Any] = processor_wavaveca(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
snake_case__ : Optional[int] = processor_auto(SCREAMING_SNAKE_CASE_ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 )
snake_case__ : int = self._get_dummy_logits()
snake_case__ : Tuple = processor_wavaveca.batch_decode(SCREAMING_SNAKE_CASE_ )
snake_case__ : Optional[Any] = processor_auto.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def UpperCAmelCase__ ( self : Tuple ):
snake_case__ : Optional[int] = self.get_feature_extractor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : List[Any] = self.get_decoder()
snake_case__ : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def UpperCAmelCase__ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] ):
snake_case__ : int = [d[key] for d in offsets]
return retrieved_list
def UpperCAmelCase__ ( self : int ):
snake_case__ : str = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case__ : Optional[Any] = self._get_dummy_logits()[0]
snake_case__ : Dict = processor.decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def UpperCAmelCase__ ( self : Optional[int] ):
snake_case__ : Tuple = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
snake_case__ : Union[str, Any] = self._get_dummy_logits()
snake_case__ : Tuple = processor.batch_decode(SCREAMING_SNAKE_CASE_ , output_word_offsets=SCREAMING_SNAKE_CASE_ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def UpperCAmelCase__ ( self : Optional[Any] ):
import torch
snake_case__ : Optional[Any] = load_dataset('common_voice' , 'en' , split='train' , streaming=SCREAMING_SNAKE_CASE_ )
snake_case__ : Union[str, Any] = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
snake_case__ : List[str] = iter(SCREAMING_SNAKE_CASE_ )
snake_case__ : List[Any] = next(SCREAMING_SNAKE_CASE_ )
snake_case__ : Union[str, Any] = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
snake_case__ : Tuple = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
snake_case__ : List[Any] = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
snake_case__ : Optional[int] = model(SCREAMING_SNAKE_CASE_ ).logits.cpu().numpy()
snake_case__ : Optional[int] = processor.decode(logits[0] , output_word_offsets=SCREAMING_SNAKE_CASE_ )
snake_case__ : int = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
snake_case__ : Optional[int] = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
snake_case__ : Optional[Any] = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(' '.join(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'word' ) ) , output.text )
# output times
snake_case__ : str = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'start_time' ) )
snake_case__ : Tuple = torch.tensor(self.get_from_offsets(SCREAMING_SNAKE_CASE_ , 'end_time' ) )
# fmt: off
snake_case__ : Any = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] )
snake_case__ : str = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.01 ) )
self.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=0.01 ) )
| 170 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Optional[int]:
lowercase_ = {}
lowercase_ = tokenizer(example["""content"""] , truncation=UpperCAmelCase_ )['input_ids']
lowercase_ = len(example["""content"""] ) / len(output["""input_ids"""] )
return output
__snake_case = HfArgumentParser(PretokenizationArguments)
__snake_case = parser.parse_args()
if args.num_workers is None:
__snake_case = multiprocessing.cpu_count()
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_dir)
__snake_case = time.time()
__snake_case = load_dataset(args.dataset_name, split="""train""")
print(f'''Dataset loaded in {time.time()-t_start:.2f}s''')
__snake_case = time.time()
__snake_case = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"""repo_name""",
"""path""",
"""copies""",
"""size""",
"""content""",
"""license""",
"""hash""",
"""line_mean""",
"""line_max""",
"""alpha_frac""",
"""autogenerated""",
],
)
print(f'''Dataset tokenized in {time.time()-t_start:.2f}s''')
__snake_case = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f'''Data pushed to the hub in {time.time()-t_start:.2f}s''')
| 451 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = """
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
"""
UpperCamelCase_ = """
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results['matthews_correlation'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results['matthews_correlation'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric(\"matthews_correlation\")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results['matthews_correlation'], 2))
-0.25
"""
UpperCamelCase_ = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'''
] , )
def _UpperCAmelCase ( self : Dict , snake_case_ : List[str] , snake_case_ : Optional[Any] , snake_case_ : List[Any]=None ):
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , sample_weight=SCREAMING_SNAKE_CASE_ ) ),
} | 256 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class a__ ( _UpperCAmelCase ):
lowercase_ = 'rwkv'
lowercase_ = {'max_position_embeddings': 'context_length'}
def __init__( self : Any , UpperCamelCase_ : Optional[int]=50277 , UpperCamelCase_ : List[Any]=1024 , UpperCamelCase_ : List[str]=4096 , UpperCamelCase_ : Optional[Any]=32 , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : List[Any]=1e-5 , UpperCamelCase_ : Any=0 , UpperCamelCase_ : int=0 , UpperCamelCase_ : Union[str, Any]=6 , UpperCamelCase_ : str=False , UpperCamelCase_ : int=True , **UpperCamelCase_ : Union[str, Any] , ):
"""simple docstring"""
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : Tuple = context_length
__UpperCAmelCase : str = hidden_size
__UpperCAmelCase : List[str] = num_hidden_layers
__UpperCAmelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
__UpperCAmelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__UpperCAmelCase : Optional[Any] = layer_norm_epsilon
__UpperCAmelCase : int = rescale_every
__UpperCAmelCase : Tuple = use_cache
__UpperCAmelCase : int = bos_token_id
__UpperCAmelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
| 77 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ):
A_ = len(UpperCAmelCase_ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([". " * i + "Q " + ". " * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(UpperCAmelCase_ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , UpperCAmelCase_ , UpperCAmelCase_ , )
def a_ ( UpperCamelCase_ ):
A_ = []
depth_first_search([] , [] , [] , UpperCAmelCase_ , UpperCAmelCase_ )
# Print all the boards
for board in boards:
for column in board:
print(UpperCAmelCase_ )
print("" )
print(len(UpperCAmelCase_ ) , "solutions were found." )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 452 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ : Dict = TaTokenizerFast
A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
UpperCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def _lowerCamelCase ( UpperCAmelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = {}
with open(UpperCAmelCase_, "r" ) as file:
for line_number, line in enumerate(UpperCAmelCase_ ):
A__ = line.strip()
if line:
A__ = line.split()
A__ = line_number
A__ = words[0]
A__ = value
return result
def _lowerCamelCase ( UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Dict, UpperCAmelCase_ : Tuple, UpperCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
for attribute in key.split("." ):
A__ = getattr(UpperCAmelCase_, UpperCAmelCase_ )
A__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase_ ):
A__ = PARAM_MAPPING[full_name.split("." )[-1]]
A__ = 'param'
if weight_type is not None and weight_type != "param":
A__ = getattr(UpperCAmelCase_, UpperCAmelCase_ ).shape
elif weight_type is not None and weight_type == "param":
A__ = hf_pointer
for attribute in hf_param_name.split("." ):
A__ = getattr(UpperCAmelCase_, UpperCAmelCase_ )
A__ = shape_pointer.shape
# let's reduce dimension
A__ = value[0]
else:
A__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
A__ = value
elif weight_type == "weight_g":
A__ = value
elif weight_type == "weight_v":
A__ = value
elif weight_type == "bias":
A__ = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
A__ = getattr(UpperCAmelCase_, UpperCAmelCase_ )
A__ = value
else:
A__ = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def _lowerCamelCase ( UpperCAmelCase_ : Dict, UpperCAmelCase_ : Tuple, UpperCAmelCase_ : Optional[Any], UpperCAmelCase_ : Dict, UpperCAmelCase_ : int ) -> Dict:
"""simple docstring"""
A__ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(UpperCAmelCase_ ):
A__ = PARAM_MAPPING[full_name.split("." )[-1]]
A__ = 'param'
if weight_type is not None and weight_type != "param":
A__ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
A__ = '.'.join([key, hf_param_name] )
else:
A__ = key
A__ = value if 'lm_head' in full_key else value[0]
UpperCamelCase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def _lowerCamelCase ( UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : str, UpperCAmelCase_ : Optional[Any]=None, UpperCAmelCase_ : Tuple=None ) -> List[Any]:
"""simple docstring"""
A__ = False
for key, mapped_key in MAPPING.items():
A__ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
A__ = True
if "*" in mapped_key:
A__ = name.split(UpperCAmelCase_ )[0].split("." )[-2]
A__ = mapped_key.replace("*", UpperCAmelCase_ )
if "weight_g" in name:
A__ = 'weight_g'
elif "weight_v" in name:
A__ = 'weight_v'
elif "bias" in name:
A__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
A__ = 'weight'
else:
A__ = None
if hf_dict is not None:
rename_dict(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
else:
set_recursively(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
return is_used
return is_used
def _lowerCamelCase ( UpperCAmelCase_ : Any, UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : str ) -> Any:
"""simple docstring"""
A__ = []
A__ = fairseq_model.state_dict()
A__ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
A__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, hf_model.config.feat_extract_norm == "group", )
A__ = True
else:
A__ = load_wavaveca_layer(UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ )
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _lowerCamelCase ( UpperCAmelCase_ : Tuple, UpperCAmelCase_ : str, UpperCAmelCase_ : Tuple, UpperCAmelCase_ : List[str], UpperCAmelCase_ : Tuple ) -> Union[str, Any]:
"""simple docstring"""
A__ = full_name.split("conv_layers." )[-1]
A__ = name.split("." )
A__ = int(items[0] )
A__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
A__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
@torch.no_grad()
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Dict, UpperCAmelCase_ : Union[str, Any]=None, UpperCAmelCase_ : List[str]=None, UpperCAmelCase_ : Union[str, Any]=True, UpperCAmelCase_ : List[str]=False ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
A__ = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
else:
A__ = WavaVecaConfig()
if is_seq_class:
A__ = read_txt_into_dict(UpperCAmelCase_ )
A__ = idalabel
A__ = WavaVecaForSequenceClassification(UpperCAmelCase_ )
A__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=UpperCAmelCase_, return_attention_mask=UpperCAmelCase_, )
feature_extractor.save_pretrained(UpperCAmelCase_ )
elif is_finetuned:
if dict_path:
A__ = Dictionary.load(UpperCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A__ = target_dict.pad_index
A__ = target_dict.bos_index
A__ = target_dict.eos_index
A__ = len(target_dict.symbols )
A__ = os.path.join(UpperCAmelCase_, "vocab.json" )
if not os.path.isdir(UpperCAmelCase_ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(UpperCAmelCase_ ) )
return
os.makedirs(UpperCAmelCase_, exist_ok=UpperCAmelCase_ )
A__ = target_dict.indices
# fairseq has the <pad> and <s> switched
A__ = 0
A__ = 1
with open(UpperCAmelCase_, "w", encoding="utf-8" ) as vocab_handle:
json.dump(UpperCAmelCase_, UpperCAmelCase_ )
A__ = WavaVecaCTCTokenizer(
UpperCAmelCase_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="|", do_lower_case=UpperCAmelCase_, )
A__ = True if config.feat_extract_norm == 'layer' else False
A__ = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=UpperCAmelCase_, return_attention_mask=UpperCAmelCase_, )
A__ = WavaVecaProcessor(feature_extractor=UpperCAmelCase_, tokenizer=UpperCAmelCase_ )
processor.save_pretrained(UpperCAmelCase_ )
A__ = WavaVecaForCTC(UpperCAmelCase_ )
else:
A__ = WavaVecaForPreTraining(UpperCAmelCase_ )
if is_finetuned or is_seq_class:
A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
A__ = argparse.Namespace(task="audio_pretraining" )
A__ = fairseq.tasks.setup_task(UpperCAmelCase_ )
A__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path], task=UpperCAmelCase_ )
A__ = model[0].eval()
recursively_load_weights(UpperCAmelCase_, UpperCAmelCase_, not is_finetuned )
hf_wavavec.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
UpperCamelCase = parser.parse_args()
UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 104 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Any =['sentencepiece']
def __init__( self :int, *snake_case :int, **snake_case :Tuple):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =['sentencepiece']
def __init__( self :Optional[Any], *snake_case :str, **snake_case :List[str]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Dict =['sentencepiece']
def __init__( self :List[str], *snake_case :int, **snake_case :str):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =['sentencepiece']
def __init__( self :int, *snake_case :int, **snake_case :Optional[int]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =['sentencepiece']
def __init__( self :Union[str, Any], *snake_case :List[str], **snake_case :int):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : str =['sentencepiece']
def __init__( self :Optional[Any], *snake_case :Union[str, Any], **snake_case :Dict):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =['sentencepiece']
def __init__( self :int, *snake_case :Tuple, **snake_case :Optional[int]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Dict =['sentencepiece']
def __init__( self :str, *snake_case :str, **snake_case :int):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : str =['sentencepiece']
def __init__( self :Union[str, Any], *snake_case :str, **snake_case :Tuple):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : int =['sentencepiece']
def __init__( self :Tuple, *snake_case :Optional[Any], **snake_case :List[str]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =['sentencepiece']
def __init__( self :Any, *snake_case :int, **snake_case :str):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Dict =['sentencepiece']
def __init__( self :Dict, *snake_case :Any, **snake_case :str):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] =['sentencepiece']
def __init__( self :Dict, *snake_case :Tuple, **snake_case :str):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =['sentencepiece']
def __init__( self :Dict, *snake_case :List[str], **snake_case :List[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['sentencepiece']
def __init__( self :str, *snake_case :int, **snake_case :Union[str, Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Any =['sentencepiece']
def __init__( self :Optional[Any], *snake_case :Optional[int], **snake_case :List[str]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['sentencepiece']
def __init__( self :Any, *snake_case :str, **snake_case :Union[str, Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['sentencepiece']
def __init__( self :Dict, *snake_case :Optional[int], **snake_case :List[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =['sentencepiece']
def __init__( self :Optional[int], *snake_case :Optional[Any], **snake_case :List[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[Any] =['sentencepiece']
def __init__( self :List[str], *snake_case :Any, **snake_case :List[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['sentencepiece']
def __init__( self :str, *snake_case :Dict, **snake_case :Optional[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =['sentencepiece']
def __init__( self :List[Any], *snake_case :Union[str, Any], **snake_case :Any):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : int =['sentencepiece']
def __init__( self :Tuple, *snake_case :Union[str, Any], **snake_case :int):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Any =['sentencepiece']
def __init__( self :List[str], *snake_case :str, **snake_case :Optional[int]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['sentencepiece']
def __init__( self :int, *snake_case :str, **snake_case :List[str]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['sentencepiece']
def __init__( self :Union[str, Any], *snake_case :List[Any], **snake_case :int):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] =['sentencepiece']
def __init__( self :List[str], *snake_case :Optional[int], **snake_case :Union[str, Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['sentencepiece']
def __init__( self :List[Any], *snake_case :Union[str, Any], **snake_case :List[Any]):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['sentencepiece']
def __init__( self :Optional[Any], *snake_case :Any, **snake_case :Tuple):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : Tuple =['sentencepiece']
def __init__( self :Optional[Any], *snake_case :Optional[int], **snake_case :Tuple):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
class SCREAMING_SNAKE_CASE_ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
__lowerCAmelCase : List[str] =['sentencepiece']
def __init__( self :Tuple, *snake_case :Tuple, **snake_case :Tuple):
"""simple docstring"""
requires_backends(self, ['sentencepiece'])
| 181 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowercase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 291 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Dict = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]:
require_version(deps[pkg] , UpperCAmelCase_ )
| 13 | 0 |
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
lowerCamelCase = """\
@misc{wu2016googles,
title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
lowerCamelCase = """\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the 'GLEU score'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score's range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
"""
lowerCamelCase = """\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
'google_bleu': google_bleu score
Examples:
Example 1:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.44
Example 2:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results[\"google_bleu\"], 2))
0.61
Example 3:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results[\"google_bleu\"], 2))
0.53
Example 4:
>>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',
... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']
>>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',
... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',
... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']
>>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',
... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',
... 'heed', 'the', 'cat', 'commands']
>>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',
... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',
... 'of', 'the', 'cat']
>>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',
... 'interested', 'in', 'world', 'history']
>>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',
... 'because', 'he', 'read', 'the', 'book']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric(\"google_bleu\")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results[\"google_bleu\"], 2))
0.4
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _a ( datasets.Metric ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , )
def _A ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = 4 , ):
"""simple docstring"""
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=SCREAMING_SNAKE_CASE_ , hypotheses=SCREAMING_SNAKE_CASE_ , min_len=SCREAMING_SNAKE_CASE_ , max_len=SCREAMING_SNAKE_CASE_ )
}
| 191 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13 | 0 |
from ....utils import logging
SCREAMING_SNAKE_CASE :Tuple = logging.get_logger(__name__)
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any]=None , _lowerCAmelCase : str=2_0_4_8 ) -> Tuple:
"""simple docstring"""
snake_case_ = config.__dict__
snake_case_ = modal_hidden_size
if num_labels:
snake_case_ = num_labels
| 283 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 0 |
from __future__ import annotations
from collections.abc import Callable
snake_case_ = list[list[float | int]]
def lowerCamelCase__ ( snake_case_ : Matrix , snake_case_ : Matrix ) -> Matrix:
__snake_case = len(UpperCAmelCase_ )
__snake_case = [[0 for _ in range(size + 1 )] for _ in range(UpperCAmelCase_ )]
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
for row in range(UpperCAmelCase_ ):
for col in range(UpperCAmelCase_ ):
__snake_case = matrix[row][col]
__snake_case = vector[row][0]
__snake_case = 0
__snake_case = 0
while row < size and col < size:
# pivoting
__snake_case = max((abs(augmented[rowa][col] ), rowa) for rowa in range(UpperCAmelCase_ , UpperCAmelCase_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__snake_case = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , UpperCAmelCase_ ):
__snake_case = augmented[rowa][col] / augmented[row][col]
__snake_case = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , UpperCAmelCase_ ):
for row in range(UpperCAmelCase_ ):
__snake_case = augmented[row][col] / augmented[col][col]
for cola in range(UpperCAmelCase_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(UpperCAmelCase_ )
]
def lowerCamelCase__ ( snake_case_ : list[int] ) -> Callable[[int], int]:
__snake_case = len(UpperCAmelCase_ )
__snake_case = [[0 for _ in range(UpperCAmelCase_ )] for _ in range(UpperCAmelCase_ )]
__snake_case = [[0] for _ in range(UpperCAmelCase_ )]
__snake_case = 42
__snake_case = 42
__snake_case = 42
__snake_case = 42
for x_val, y_val in enumerate(UpperCAmelCase_ ):
for col in range(UpperCAmelCase_ ):
__snake_case = (x_val + 1) ** (size - col - 1)
__snake_case = y_val
__snake_case = solve(UpperCAmelCase_ , UpperCAmelCase_ )
def interpolated_func(snake_case_ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(UpperCAmelCase_ ) )
return interpolated_func
def lowerCamelCase__ ( snake_case_ : int ) -> int:
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase__ ( snake_case_ : Callable[[int], int] = question_function , snake_case_ : int = 10 ) -> int:
__snake_case = [func(UpperCAmelCase_ ) for x_val in range(1 , order + 1 )]
__snake_case = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__snake_case = 0
__snake_case = 42
__snake_case = 42
for poly in polynomials:
__snake_case = 1
while func(UpperCAmelCase_ ) == poly(UpperCAmelCase_ ):
x_val += 1
ret += poly(UpperCAmelCase_ )
return ret
if __name__ == "__main__":
print(F'{solution() = }')
| 592 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A__ : Optional[Any] = tuple[int, int]
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
__lowerCamelCase : set[int] = vertices
__lowerCamelCase : dict[EdgeT, int] = {
(min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )): weight for edge, weight in edges.items()
}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__lowerCamelCase : Union[str, Any] = weight
def lowercase_ ( self ) -> Graph:
__lowerCamelCase : Graph = Graph({min(self.vertices )} , {} )
__lowerCamelCase : EdgeT
__lowerCamelCase : int
__lowerCamelCase : EdgeT
__lowerCamelCase : int
while len(subgraph.vertices ) < len(self.vertices ):
__lowerCamelCase : Any = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__lowerCamelCase : Optional[int] = edge
__lowerCamelCase : List[str] = weight
subgraph.add_edge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return subgraph
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "p107_network.txt" ) -> int:
__lowerCamelCase : str = os.path.abspath(os.path.dirname(UpperCAmelCase_ ) )
__lowerCamelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : dict[EdgeT, int] = {}
__lowerCamelCase : list[str]
__lowerCamelCase : int
__lowerCamelCase : int
with open(UpperCAmelCase_ ) as f:
__lowerCamelCase : Any = f.read().strip().split('\n' )
__lowerCamelCase : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(UpperCAmelCase_ ) ):
for edgea in range(UpperCAmelCase_ ):
if adjaceny_matrix[edgea][edgea] != "-":
__lowerCamelCase : int = int(adjaceny_matrix[edgea][edgea] )
__lowerCamelCase : Graph = Graph(set(range(len(UpperCAmelCase_ ) ) ) , UpperCAmelCase_ )
__lowerCamelCase : Graph = graph.prims_algorithm()
__lowerCamelCase : int = sum(graph.edges.values() )
__lowerCamelCase : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
def lowercase__( A ):
return "".join(chr(ord(UpperCAmelCase_ ) - 3_2 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 170 |
'''simple docstring'''
from collections.abc import Generator
from math import sin
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
if len(UpperCAmelCase_ ) != 32:
raise ValueError('Input must be of length 32' )
__lowerCamelCase : Dict = B''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> bytes:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : Union[str, Any] = format(UpperCAmelCase_ , '08x' )[-8:]
__lowerCamelCase : str = B''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' )
return little_endian_hex
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = B''
for char in message:
bit_string += format(UpperCAmelCase_ , '08b' ).encode('utf-8' )
__lowerCamelCase : List[str] = format(len(UpperCAmelCase_ ) , '064b' ).encode('utf-8' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCAmelCase_ ) % 5_12 != 4_48:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> Generator[list[int], None, None]:
if len(UpperCAmelCase_ ) % 5_12 != 0:
raise ValueError('Input must have length that\'s a multiple of 512' )
for pos in range(0 , len(UpperCAmelCase_ ) , 5_12 ):
__lowerCamelCase : Any = bit_string[pos : pos + 5_12]
__lowerCamelCase : Optional[int] = []
for i in range(0 , 5_12 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def UpperCAmelCase__ ( UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
__lowerCamelCase : List[Any] = format(UpperCAmelCase_ , '032b' )
__lowerCamelCase : Optional[int] = ''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCAmelCase_ , 2 )
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
return (a + b) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
if i < 0:
raise ValueError('Input must be non-negative' )
if shift < 0:
raise ValueError('Shift must be non-negative' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def UpperCAmelCase__ ( UpperCAmelCase_ : bytes ) -> bytes:
__lowerCamelCase : Optional[Any] = preprocess(UpperCAmelCase_ )
__lowerCamelCase : Union[str, Any] = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
__lowerCamelCase : Dict = 0x67_45_23_01
__lowerCamelCase : Union[str, Any] = 0xef_cd_ab_89
__lowerCamelCase : Optional[Any] = 0x98_ba_dc_fe
__lowerCamelCase : Union[str, Any] = 0x10_32_54_76
__lowerCamelCase : List[str] = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCAmelCase_ ):
__lowerCamelCase : Dict = aa
__lowerCamelCase : Tuple = ba
__lowerCamelCase : List[Any] = ca
__lowerCamelCase : Dict = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
__lowerCamelCase : List[str] = d ^ (b & (c ^ d))
__lowerCamelCase : Optional[int] = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
__lowerCamelCase : Optional[int] = c ^ (d & (b ^ c))
__lowerCamelCase : Tuple = (5 * i + 1) % 16
elif i <= 47:
__lowerCamelCase : str = b ^ c ^ d
__lowerCamelCase : Any = (3 * i + 5) % 16
else:
__lowerCamelCase : Union[str, Any] = c ^ (b | not_aa(UpperCAmelCase_ ))
__lowerCamelCase : int = (7 * i) % 16
__lowerCamelCase : Optional[int] = (f + a + added_consts[i] + block_words[g]) % 2**32
__lowerCamelCase : Optional[Any] = d
__lowerCamelCase : Tuple = c
__lowerCamelCase : Optional[int] = b
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , left_rotate_aa(UpperCAmelCase_ , shift_amounts[i] ) )
# Add hashed chunk to running total
__lowerCamelCase : int = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[Any] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : List[str] = sum_aa(UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : Dict = reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ ) + reformat_hex(UpperCAmelCase_ )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
import sys
from pathlib import Path
__snake_case = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__snake_case = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
__snake_case = """zero2"""
__snake_case = """zero3"""
__snake_case = [ZEROa, ZEROa]
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
lowercase_ = parameterized.to_safe_name("""_""".join(str(UpperCAmelCase_ ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__snake_case = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class _a ( _UpperCAmelCase ):
"""simple docstring"""
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Any , lowercase_ : Tuple , lowercase_ : int ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : List[Any] ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
@require_torch_multi_gpu
@parameterized.expand(SCREAMING_SNAKE_CASE_ , name_func=SCREAMING_SNAKE_CASE_ )
def lowerCamelCase__ ( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] ):
'''simple docstring'''
self.run_and_check(
stage=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
def lowerCamelCase__ ( self : List[str] , lowercase_ : Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self : List[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : int = 10 , lowercase_ : List[str] = True , lowercase_ : int = True , lowercase_ : List[Any] = True , ):
'''simple docstring'''
lowercase_ = models[model]
lowercase_ = self.run_trainer(
stage=SCREAMING_SNAKE_CASE_ , model_name=SCREAMING_SNAKE_CASE_ , eval_steps=SCREAMING_SNAKE_CASE_ , num_train_epochs=1 , distributed=SCREAMING_SNAKE_CASE_ , fpaa=SCREAMING_SNAKE_CASE_ , )
self.do_checks(SCREAMING_SNAKE_CASE_ )
return output_dir
def lowerCamelCase__ ( self : Any , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : int = 10 , lowercase_ : List[Any] = 1 , lowercase_ : int = True , lowercase_ : Optional[int] = True , ):
'''simple docstring'''
lowercase_ = self.get_auto_remove_tmp_dir("""./xxx""" , after=SCREAMING_SNAKE_CASE_ )
lowercase_ = F"""\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(SCREAMING_SNAKE_CASE_ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n """.split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
lowercase_ = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
lowercase_ = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
lowercase_ = self.get_launcher(SCREAMING_SNAKE_CASE_ )
lowercase_ = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=self.get_env() )
return output_dir
def lowerCamelCase__ ( self : Optional[Any] , lowercase_ : Dict=False ):
'''simple docstring'''
lowercase_ = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 451 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'rwkv'
lowerCamelCase : Any = {'max_position_embeddings': 'context_length'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Tuple = context_length
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : Optional[Any] = layer_norm_epsilon
__lowerCamelCase : int = rescale_every
__lowerCamelCase : Tuple = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class _SCREAMING_SNAKE_CASE ( _UpperCAmelCase ):
lowerCamelCase_ = 'deta'
lowerCamelCase_ = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[Any] , snake_case_ : Optional[Any]=None , snake_case_ : int=900 , snake_case_ : List[Any]=2048 , snake_case_ : int=6 , snake_case_ : Dict=2048 , snake_case_ : str=8 , snake_case_ : Optional[Any]=6 , snake_case_ : str=1024 , snake_case_ : int=8 , snake_case_ : Optional[int]=0.0 , snake_case_ : List[Any]=True , snake_case_ : Union[str, Any]="relu" , snake_case_ : Dict=256 , snake_case_ : Tuple=0.1 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : Tuple=0.0 , snake_case_ : str=0.02 , snake_case_ : Union[str, Any]=1.0 , snake_case_ : Tuple=True , snake_case_ : str=False , snake_case_ : Union[str, Any]="sine" , snake_case_ : str=5 , snake_case_ : Tuple=4 , snake_case_ : Optional[Any]=4 , snake_case_ : int=True , snake_case_ : List[Any]=300 , snake_case_ : Optional[int]=True , snake_case_ : List[str]=True , snake_case_ : str=1 , snake_case_ : List[Any]=5 , snake_case_ : Optional[Any]=2 , snake_case_ : Optional[Any]=1 , snake_case_ : Optional[Any]=1 , snake_case_ : List[Any]=5 , snake_case_ : Optional[Any]=2 , snake_case_ : int=0.1 , snake_case_ : Optional[Any]=0.25 , **snake_case_ : Any , ):
"""simple docstring"""
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
A : Any = CONFIG_MAPPING['resnet'](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
A : Dict = backbone_config.pop('''model_type''' )
A : List[str] = CONFIG_MAPPING[backbone_model_type]
A : int = config_class.from_dict(SCREAMING_SNAKE_CASE_ )
A : str = backbone_config
A : Dict = num_queries
A : Dict = max_position_embeddings
A : Tuple = d_model
A : Optional[int] = encoder_ffn_dim
A : Dict = encoder_layers
A : Optional[int] = encoder_attention_heads
A : str = decoder_ffn_dim
A : int = decoder_layers
A : int = decoder_attention_heads
A : List[str] = dropout
A : List[Any] = attention_dropout
A : Union[str, Any] = activation_dropout
A : str = activation_function
A : Optional[Any] = init_std
A : Optional[Any] = init_xavier_std
A : Optional[int] = encoder_layerdrop
A : str = auxiliary_loss
A : Union[str, Any] = position_embedding_type
# deformable attributes
A : int = num_feature_levels
A : List[str] = encoder_n_points
A : Optional[Any] = decoder_n_points
A : Tuple = two_stage
A : List[str] = two_stage_num_proposals
A : Optional[Any] = with_box_refine
A : str = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
A : List[Any] = class_cost
A : str = bbox_cost
A : Optional[Any] = giou_cost
# Loss coefficients
A : Optional[int] = mask_loss_coefficient
A : Any = dice_loss_coefficient
A : Optional[int] = bbox_loss_coefficient
A : str = giou_loss_coefficient
A : Tuple = eos_coefficient
A : str = focal_alpha
super().__init__(is_encoder_decoder=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
@property
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return self.encoder_attention_heads
@property
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return self.d_model
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : List[str] = copy.deepcopy(self.__dict__ )
A : Optional[Any] = self.backbone_config.to_dict()
A : Optional[int] = self.__class__.model_type
return output | 256 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 10_00 ) -> int:
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
A = 16
A = 32
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase = 16 ) -> str:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
__UpperCAmelCase : Any = load_dataset("glue" , "mrpc" )
def tokenize_function(UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__UpperCAmelCase : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCAmelCase : Union[str, Any] = datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCAmelCase : Tuple = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCAmelCase : Dict = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCAmelCase : List[Any] = 16
elif accelerator.mixed_precision != "no":
__UpperCAmelCase : Optional[int] = 8
else:
__UpperCAmelCase : Any = None
return tokenizer.pad(
UpperCAmelCase_ , padding="longest" , max_length=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_tensors="pt" , )
# Instantiate dataloaders.
__UpperCAmelCase : Optional[int] = DataLoader(
tokenized_datasets["train"] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
__UpperCAmelCase : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
A = mocked_dataloaders # noqa: F811
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , UpperCAmelCase_ ) == "1":
__UpperCAmelCase : Tuple = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCAmelCase : str = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
__UpperCAmelCase : int = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCAmelCase : Optional[Any] = config['lr']
__UpperCAmelCase : int = int(config["num_epochs"] )
__UpperCAmelCase : Union[str, Any] = int(config["seed"] )
__UpperCAmelCase : List[str] = int(config["batch_size"] )
set_seed(UpperCAmelCase_ )
__UpperCAmelCase : Optional[Any] = get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ )
__UpperCAmelCase : int = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
__UpperCAmelCase : Optional[int] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCAmelCase : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
__UpperCAmelCase : Optional[int] = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCAmelCase : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=UpperCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCAmelCase : Dict = model.to(accelerator.device )
# Instantiate optimizer
__UpperCAmelCase : int = AdamW(params=model.parameters() , lr=UpperCAmelCase_ )
# Instantiate scheduler
__UpperCAmelCase : str = get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=100 , num_training_steps=(len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCAmelCase : Dict = accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCAmelCase : List[str] = os.path.split(UpperCAmelCase_ )[-1].split("." )[0]
accelerator.init_trackers(UpperCAmelCase_ , UpperCAmelCase_ )
# Now we train the model
for epoch in range(UpperCAmelCase_ ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCAmelCase : Dict = 0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCAmelCase : Union[str, Any] = model(**UpperCAmelCase_ )
__UpperCAmelCase : Dict = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCAmelCase : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCAmelCase : Any = model(**UpperCAmelCase_ )
__UpperCAmelCase : List[Any] = outputs.logits.argmax(dim=-1 )
__UpperCAmelCase : Any = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
__UpperCAmelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , UpperCAmelCase_ )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
"accuracy": eval_metric["accuracy"],
"f1": eval_metric["f1"],
"train_loss": total_loss.item() / len(UpperCAmelCase_ ),
"epoch": epoch,
} , step=UpperCAmelCase_ , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=UpperCAmelCase_ , default=UpperCAmelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCAmelCase_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
__UpperCAmelCase : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 77 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Dict = XGLMConfig
lowerCamelCase : List[str] = {}
lowerCamelCase : Union[str, Any] = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=14 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Any:
__lowerCamelCase : int = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : Optional[Any] = seq_length
__lowerCamelCase : Optional[int] = is_training
__lowerCamelCase : str = use_input_mask
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : Union[str, Any] = vocab_size
__lowerCamelCase : List[Any] = d_model
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : Optional[Any] = ffn_dim
__lowerCamelCase : List[Any] = activation_function
__lowerCamelCase : List[Any] = activation_dropout
__lowerCamelCase : List[Any] = attention_dropout
__lowerCamelCase : Union[str, Any] = max_position_embeddings
__lowerCamelCase : Tuple = initializer_range
__lowerCamelCase : int = None
__lowerCamelCase : int = 0
__lowerCamelCase : Tuple = 2
__lowerCamelCase : Tuple = 1
def lowercase_ ( self ) -> Any:
return XGLMConfig.from_pretrained('facebook/xglm-564M' )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
__lowerCamelCase : Optional[int] = None
if self.use_input_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : str = self.get_config()
__lowerCamelCase : List[Any] = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def lowercase_ ( self ) -> Optional[int]:
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=SCREAMING_SNAKE_CASE_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> str:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : str = config_and_inputs
__lowerCamelCase : Union[str, Any] = {
'input_ids': input_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
lowerCamelCase : List[Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
lowerCamelCase : Any = (
{'feature-extraction': TFXGLMModel, 'text-generation': TFXGLMForCausalLM} if is_tf_available() else {}
)
lowerCamelCase : List[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : str = TFXGLMModelTester(self )
__lowerCamelCase : List[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , n_embd=37 )
def lowercase_ ( self ) -> Dict:
self.config_tester.run_common_tests()
@slow
def lowercase_ ( self ) -> Optional[int]:
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Optional[Any] = TFXGLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='Currently, model embeddings are going to undergo a major refactor.' )
def lowercase_ ( self ) -> Any:
super().test_resize_token_embeddings()
@require_tf
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=True ) -> List[str]:
__lowerCamelCase : Optional[Any] = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : int = tf.convert_to_tensor([[2, 2_68, 98_65]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
__lowerCamelCase : Optional[int] = [2, 2_68, 98_65, 67, 11, 19_88, 5_72_52, 98_65, 5, 9_84, 67, 19_88, 21_38_38, 16_58, 53, 7_04_46, 33, 66_57, 2_78, 15_81]
# fmt: on
__lowerCamelCase : Any = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
tf.random.set_seed(0 )
__lowerCamelCase : List[Any] = tokenizer('Today is a nice day and' , return_tensors='tf' )
__lowerCamelCase : int = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(':/CPU:0' ):
__lowerCamelCase : Tuple = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ , seed=[7, 0] )
__lowerCamelCase : Optional[Any] = tokenizer.decode(output_ids[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = (
'Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due'
)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def lowercase_ ( self ) -> int:
__lowerCamelCase : Tuple = TFXGLMForCausalLM.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
__lowerCamelCase : Any = 'left'
# use different length sentences to test batching
__lowerCamelCase : Any = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When',
'Hello, my dog is a little',
]
__lowerCamelCase : Any = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors='tf' , padding=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inputs['input_ids']
__lowerCamelCase : str = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=inputs['attention_mask'] , max_new_tokens=12 )
__lowerCamelCase : Optional[int] = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
__lowerCamelCase : int = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Optional[Any] = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
__lowerCamelCase : Optional[Any] = model.generate(input_ids=SCREAMING_SNAKE_CASE_ , max_new_tokens=12 )
__lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = [
'This is an extremelly long sentence that only exists to test the ability of the model to cope with '
'left-padding, such as in batched generation. The output for the sequence below should be the same '
'regardless of whether left padding is applied or not. When left padding is applied, the sequence will be '
'a single',
'Hello, my dog is a little bit of a shy one, but he is very friendly',
]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , [non_padded_sentence, padded_sentence] )
| 13 | 0 |
'''simple docstring'''
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : int = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
__SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
__SCREAMING_SNAKE_CASE : List[Any] = {
"""abeja/gpt-neox-japanese-2.7b""": 2048,
}
def a_ ( UpperCamelCase_ , UpperCamelCase_ ):
with open(UpperCAmelCase_ , "r" , encoding="utf-8" ) as f:
A_ = json.loads(f.read() )
A_ = collections.OrderedDict()
A_ = collections.OrderedDict()
A_ = collections.OrderedDict()
with open(UpperCAmelCase_ , "r" , encoding="utf-8" ) as f:
A_ = f.readlines()
A_ = [[t.rstrip("\n" )] if (t == ',' or ',' not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(UpperCAmelCase_ ):
A_ = b
A_ = idx
for wd in b:
A_ = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase : List[str] =VOCAB_FILES_NAMES
_UpperCAmelCase : Tuple =PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Union[str, Any] =['input_ids', 'attention_mask']
def __init__( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[str]="<|endoftext|>" , lowerCAmelCase : Optional[Any]="<|endoftext|>" , lowerCAmelCase : List[str]="<|startoftext|>" , lowerCAmelCase : str="<|endoftext|>" , lowerCAmelCase : Union[str, Any]=False , **lowerCAmelCase : int , ):
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , do_clean_text=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"Can\'t find a vocabulary file at path \'{vocab_file}\'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
if not os.path.isfile(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"Can\'t find a emoji file at path \'{emoji_file}\'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`" )
A_ = do_clean_text
A_ = load_vocab_and_emoji(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A_ = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _UpperCAmelCase ( self : Union[str, Any] ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def _UpperCAmelCase ( self : Dict ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _UpperCAmelCase ( self : str , lowerCAmelCase : Optional[Any] ):
return self.subword_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ , clean=self.do_clean_text )
def _UpperCAmelCase ( self : List[str] , lowerCAmelCase : Union[str, Any] ):
return self.vocab.get(SCREAMING_SNAKE_CASE_ , self.vocab.get(self.unk_token ) )
def _UpperCAmelCase ( self : Any , lowerCAmelCase : Optional[int] ):
return self.subword_tokenizer.convert_id_to_token(SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Any , lowerCAmelCase : int ):
A_ = ''.join(SCREAMING_SNAKE_CASE_ ).strip()
return out_string
def _UpperCAmelCase ( self : Dict , lowerCAmelCase : Tuple ):
A_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE_ ) > self.model_max_length:
A_ = input_ids[-self.model_max_length :]
return input_ids
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : str = None ):
A_ = 0
if os.path.isdir(SCREAMING_SNAKE_CASE_ ):
A_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
A_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"] )
else:
A_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
A_ = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
F"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!" )
A_ = token_index
writer.write(",".join(SCREAMING_SNAKE_CASE_ ) + "\n" )
index += 1
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as writer:
json.dump(self.emoji , SCREAMING_SNAKE_CASE_ )
return vocab_file, emoji_file
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : int , lowerCAmelCase : str ):
A_ = vocab # same as swe
A_ = ids_to_tokens # same as bpe
A_ = emoji
A_ = np.max([len(SCREAMING_SNAKE_CASE_ ) for w in self.vocab.keys()] )
A_ = re.compile(r"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)" )
A_ = re.compile(r"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*" )
A_ = re.compile(r"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}" )
A_ = re.compile(
r"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A_ = re.compile(
r"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*" )
A_ = re.compile(
r"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*" )
A_ = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
A_ = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
A_ = str.maketrans({k: "<BLOCK>" for k in keisen + blocks} )
def __len__( self : List[str] ):
return len(self.ids_to_tokens )
def _UpperCAmelCase ( self : int , lowerCAmelCase : Dict ):
A_ = self.content_repattera.sub("<URL>" , SCREAMING_SNAKE_CASE_ )
A_ = self.content_repattera.sub("<EMAIL>" , SCREAMING_SNAKE_CASE_ )
A_ = self.content_repattera.sub("<TEL>" , SCREAMING_SNAKE_CASE_ )
A_ = self.content_repattera.sub("<DATE>" , SCREAMING_SNAKE_CASE_ )
A_ = self.content_repattera.sub("<DATE>" , SCREAMING_SNAKE_CASE_ )
A_ = self.content_repattera.sub("<PRICE>" , SCREAMING_SNAKE_CASE_ )
A_ = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
A_ = content.replace("<BLOCK><BLOCK>" , "<BLOCK>" )
return content
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any]=False ):
A_ = text.replace(" " , "<SP>" )
A_ = text.replace(" " , "<SP>" )
A_ = text.replace("\r\n" , "<BR>" )
A_ = text.replace("\n" , "<BR>" )
A_ = text.replace("\r" , "<BR>" )
A_ = text.replace("\t" , "<TAB>" )
A_ = text.replace("—" , "ー" )
A_ = text.replace("−" , "ー" )
for k, v in self.emoji["emoji"].items():
if k in text:
A_ = text.replace(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if clean:
A_ = self.clean_text(SCREAMING_SNAKE_CASE_ )
def check_simbol(lowerCAmelCase : Optional[int] ):
A_ = x.encode()
if len(SCREAMING_SNAKE_CASE_ ) == 1 and len(SCREAMING_SNAKE_CASE_ ) == 2:
A_ = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xC2A1 and c <= 0xC2BF)
or (c >= 0xC780 and c <= 0xC783)
or (c >= 0xCAB9 and c <= 0xCBBF)
or (c >= 0xCC80 and c <= 0xCDA2)
):
return True
return False
def checkuae(lowerCAmelCase : Tuple ):
A_ = x.encode()
if len(SCREAMING_SNAKE_CASE_ ) == 1 and len(SCREAMING_SNAKE_CASE_ ) == 3:
A_ = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xE2_8080 and c <= 0xE2_B07F:
return True
return False
A_ = 0
A_ = []
while pos < len(SCREAMING_SNAKE_CASE_ ):
A_ = min(len(SCREAMING_SNAKE_CASE_ ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
A_ = [] # (token_id, token, pos)
for e in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , -1 ):
A_ = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(SCREAMING_SNAKE_CASE_ ) > 2:
A_ = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
# the smallest token_id is adopted
A_ = sorted(SCREAMING_SNAKE_CASE_ , key=lambda lowerCAmelCase : x[0] )[0]
result.append(SCREAMING_SNAKE_CASE_ )
A_ = e
else:
A_ = pos + 1
A_ = text[pos:end]
if check_simbol(SCREAMING_SNAKE_CASE_ ):
result.append("<KIGOU>" )
elif checkuae(SCREAMING_SNAKE_CASE_ ):
result.append("<U2000U2BFF>" )
else:
for i in wd.encode("utf-8" ):
result.append("<|byte%d|>" % i )
A_ = end
return result
def _UpperCAmelCase ( self : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]="\n" ):
A_ = []
A_ = []
A_ = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(SCREAMING_SNAKE_CASE_ ) > 0:
words.append(bytearray(SCREAMING_SNAKE_CASE_ ).decode("utf-8" , errors="replace" ) )
A_ = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word] )
elif word == "<SP>":
words.append(" " )
elif word == "<BR>":
words.append(SCREAMING_SNAKE_CASE_ )
elif word == "<TAB>":
words.append("\t" )
elif word == "<BLOCK>":
words.append("▀" )
elif word == "<KIGOU>":
words.append("ǀ" )
elif word == "<U2000U2BFF>":
words.append("‖" )
else:
words.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
words.append(bytearray(SCREAMING_SNAKE_CASE_ ).decode("utf-8" , errors="replace" ) )
A_ = ''.join(SCREAMING_SNAKE_CASE_ )
return text
| 452 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
UpperCamelCase = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : List[str], UpperCAmelCase_ : str=None ) -> List[Any]:
"""simple docstring"""
if rng is None:
A__ = random.Random()
A__ = 1
for dim in shape:
total_dims *= dim
A__ = []
for _ in range(UpperCAmelCase_ ):
values.append(rng.randint(0, vocab_size - 1 ) )
A__ = np.array(UpperCAmelCase_, dtype=jnp.intaa ).reshape(UpperCAmelCase_ )
return output
def _lowerCamelCase ( UpperCAmelCase_ : str, UpperCAmelCase_ : Optional[int]=None ) -> str:
"""simple docstring"""
A__ = ids_tensor(UpperCAmelCase_, vocab_size=2, rng=UpperCAmelCase_ )
# make sure that at least one token is attended to for each batch
A__ = 1
return attn_mask
@require_flax
class UpperCamelCase__ :
"""simple docstring"""
A__ : Union[str, Any] = None
A__ : Optional[Any] = ()
def snake_case__ ( self ) -> Dict:
A__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
A__ = 2
A__ = inputs['input_ids'].shape[-1] // 2
A__ = inputs['input_ids'][:max_batch_size, :sequence_length]
A__ = jnp.ones_like(SCREAMING_SNAKE_CASE_ )
A__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
A__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
A__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def snake_case__ ( self ) -> List[Any]:
A__ = self._get_input_ids_and_config()
A__ = False
A__ = max_length
A__ = 0
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
A__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = pt_model_class(SCREAMING_SNAKE_CASE_ ).eval()
A__ = load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE_ , flax_model.params )
A__ = flax_model.generate(SCREAMING_SNAKE_CASE_ ).sequences
A__ = pt_model.generate(torch.tensor(SCREAMING_SNAKE_CASE_ , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
A__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def snake_case__ ( self ) -> str:
A__ = self._get_input_ids_and_config()
A__ = False
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
A__ = jit(model.generate )
A__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = self._get_input_ids_and_config()
A__ = True
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
A__ = jit(model.generate )
A__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ) -> List[Any]:
A__ = self._get_input_ids_and_config()
A__ = False
A__ = max_length
A__ = 2
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
A__ = jit(model.generate )
A__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ) -> Optional[int]:
A__ = self._get_input_ids_and_config()
A__ = False
A__ = max_length
A__ = 2
A__ = 2
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def snake_case__ ( self ) -> Dict:
A__ = self._get_input_ids_and_config()
A__ = True
A__ = max_length
A__ = 0.8
A__ = 10
A__ = 0.3
A__ = 1
A__ = 8
A__ = 9
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
A__ = jit(model.generate )
A__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ) -> Optional[int]:
A__ = self._get_input_ids_and_config()
A__ = max_length
A__ = 1
A__ = 8
A__ = 9
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
A__ = jit(model.generate )
A__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ) -> Optional[int]:
A__ = self._get_input_ids_and_config()
A__ = max_length
A__ = 2
A__ = 1
A__ = 8
A__ = 9
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
A__ = jit(model.generate )
A__ = jit_generate(SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ) -> str:
A__ = self._get_input_ids_and_config()
# pad attention mask on the left
A__ = attention_mask.at[(0, 0)].set(0 )
A__ = False
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
A__ = jit(model.generate )
A__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ) -> List[Any]:
A__ = self._get_input_ids_and_config()
# pad attention mask on the left
A__ = attention_mask.at[(0, 0)].set(0 )
A__ = True
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
A__ = jit(model.generate )
A__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def snake_case__ ( self ) -> Optional[int]:
A__ = self._get_input_ids_and_config()
# pad attention mask on the left
A__ = attention_mask.at[(0, 0)].set(0 )
A__ = 2
A__ = max_length
for model_class in self.all_generative_model_classes:
A__ = model_class(SCREAMING_SNAKE_CASE_ )
A__ = model.generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertEqual(generation_outputs.shape[-1] , SCREAMING_SNAKE_CASE_ )
A__ = jit(model.generate )
A__ = jit_generate(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> List[Any]:
A__ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" )
A__ = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" )
A__ = 'Hello world'
A__ = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="np" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , "do_samples" ):
model.generate(SCREAMING_SNAKE_CASE_ , do_samples=SCREAMING_SNAKE_CASE_ )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE_ , "foo" ):
A__ = {'foo': 'bar'}
model.generate(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 104 |
'''simple docstring'''
A__ : dict[tuple[int, int, int], int] = {}
def UpperCAmelCase__ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
__lowerCamelCase : List[Any] = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
__lowerCamelCase : Tuple = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
__lowerCamelCase : int = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
__lowerCamelCase : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
__lowerCamelCase : Optional[int] = state_late + state_absent + state_ontime
__lowerCamelCase : Union[str, Any] = prizestrings
return prizestrings
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 30 ) -> int:
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 13 | 0 |
import os
import string
import sys
_SCREAMING_SNAKE_CASE = 1 << 8
_SCREAMING_SNAKE_CASE = {
"""tab""": ord("\t"),
"""newline""": ord("\r"),
"""esc""": 27,
"""up""": 65 + ARROW_KEY_FLAG,
"""down""": 66 + ARROW_KEY_FLAG,
"""right""": 67 + ARROW_KEY_FLAG,
"""left""": 68 + ARROW_KEY_FLAG,
"""mod_int""": 91,
"""undefined""": sys.maxsize,
"""interrupt""": 3,
"""insert""": 50,
"""delete""": 51,
"""pg_up""": 53,
"""pg_down""": 54,
}
_SCREAMING_SNAKE_CASE = KEYMAP["""up"""]
_SCREAMING_SNAKE_CASE = KEYMAP["""left"""]
if sys.platform == "win32":
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = {
B"""\xe0H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\x00H""": KEYMAP["""up"""] - ARROW_KEY_FLAG,
B"""\xe0P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\x00P""": KEYMAP["""down"""] - ARROW_KEY_FLAG,
B"""\xe0M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\x00M""": KEYMAP["""right"""] - ARROW_KEY_FLAG,
B"""\xe0K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
B"""\x00K""": KEYMAP["""left"""] - ARROW_KEY_FLAG,
}
for i in range(10):
_SCREAMING_SNAKE_CASE = ord(str(i))
def _snake_case () -> int:
if os.name == "nt":
import msvcrt
_lowercase ='mbcs'
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(UpperCAmelCase_) == 0:
# Read the keystroke
_lowercase =msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_lowercase =ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_lowercase =chr(WIN_KEYMAP[cha])
WIN_CH_BUFFER.append(chr(KEYMAP['mod_int']))
WIN_CH_BUFFER.append(UpperCAmelCase_)
if ord(UpperCAmelCase_) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126))
_lowercase =chr(KEYMAP['esc'])
except KeyError:
_lowercase =cha[1]
else:
_lowercase =ch.decode(UpperCAmelCase_)
else:
_lowercase =WIN_CH_BUFFER.pop(0)
elif os.name == "posix":
import termios
import tty
_lowercase =sys.stdin.fileno()
_lowercase =termios.tcgetattr(UpperCAmelCase_)
try:
tty.setraw(UpperCAmelCase_)
_lowercase =sys.stdin.read(1)
finally:
termios.tcsetattr(UpperCAmelCase_ , termios.TCSADRAIN , UpperCAmelCase_)
return ch
def _snake_case () -> List[str]:
_lowercase =get_raw_chars()
if ord(UpperCAmelCase_) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(UpperCAmelCase_) == KEYMAP["esc"]:
_lowercase =get_raw_chars()
if ord(UpperCAmelCase_) == KEYMAP["mod_int"]:
_lowercase =get_raw_chars()
if ord(UpperCAmelCase_) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(UpperCAmelCase_) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(UpperCAmelCase_) + ARROW_KEY_FLAG)
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 181 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
A__ : Any = re.compile(R"""^(?P<major>\d+)""" R"""\.(?P<minor>\d+)""" R"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str
lowerCamelCase : Optional[str] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
lowerCamelCase : Optional[Union[str, int]] = None
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : List[Any] = _str_to_version_tuple(self.version_str )
def __repr__( self ) -> Any:
return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'
@property
def lowercase_ ( self ) -> int:
return self.major, self.minor, self.patch
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return Version(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return other
raise TypeError(f'{other} (type {type(SCREAMING_SNAKE_CASE_ )}) cannot be compared to version.' )
def __eq__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
try:
__lowerCamelCase : Union[str, Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : List[Any] = self._validate_operand(SCREAMING_SNAKE_CASE_ )
return self.tuple < other.tuple
def __hash__( self ) -> List[str]:
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : str = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def lowercase_ ( self ) -> str:
return self.version_str
def UpperCAmelCase__ ( UpperCAmelCase_ : Union[str, Any] ) -> str:
__lowerCamelCase : str = _VERSION_REG.match(UpperCAmelCase_ )
if not res:
raise ValueError(F'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' )
return tuple(int(UpperCAmelCase_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def UpperCAmelCase__ ( UpperCAmelCase_ : List[str] ) -> Dict:
return ".".join(str(UpperCAmelCase_ ) for v in version_tuple )
| 13 | 0 |
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowerCAmelCase ( _UpperCAmelCase ):
_a = 'microsoft/speecht5_tts'
_a = (
'This is a tool that reads an English text out loud. It takes an input named `text` which should contain the '
'text to read (in English) and returns a waveform object containing the sound.'
)
_a = 'text_reader'
_a = SpeechTaProcessor
_a = SpeechTaForTextToSpeech
_a = SpeechTaHifiGan
_a = ['text']
_a = ['audio']
def A__ ( self ) -> Dict:
'''simple docstring'''
if self.post_processor is None:
_lowercase ='microsoft/speecht5_hifigan'
super().setup()
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ) -> int:
'''simple docstring'''
_lowercase =self.pre_processor(text=SCREAMING_SNAKE_CASE_ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError('Datasets needs to be installed if not passing speaker embeddings.' )
_lowercase =load_dataset('Matthijs/cmu-arctic-xvectors' , split='validation' )
_lowercase =torch.tensor(embeddings_dataset[7_305]['xvector'] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**SCREAMING_SNAKE_CASE_ )
def A__ ( self , lowerCAmelCase ) -> str:
'''simple docstring'''
with torch.no_grad():
return self.post_processor(SCREAMING_SNAKE_CASE_ ).cpu().detach()
| 291 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> int:
__lowerCamelCase : Any = []
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Any:
return self.node_position[vertex]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : Optional[int] = pos
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase : str = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase : Optional[Any] = 2 * start + 1
else:
__lowerCamelCase : int = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase : int = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase : str = temp, tempa
__lowerCamelCase : Dict = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , SCREAMING_SNAKE_CASE_ )
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase : Any = position[index]
while index != 0:
__lowerCamelCase : Union[str, Any] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase : Union[str, Any] = heap[parent]
__lowerCamelCase : Any = position[parent]
self.set_position(position[parent] , SCREAMING_SNAKE_CASE_ )
else:
__lowerCamelCase : Tuple = val
__lowerCamelCase : List[str] = temp
self.set_position(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
break
__lowerCamelCase : Tuple = parent
else:
__lowerCamelCase : Union[str, Any] = val
__lowerCamelCase : Tuple = temp
self.set_position(SCREAMING_SNAKE_CASE_ , 0 )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
__lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) // 2 - 1
for i in range(SCREAMING_SNAKE_CASE_ , -1 , -1 ):
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
__lowerCamelCase : Any = positions[0]
__lowerCamelCase : Union[str, Any] = sys.maxsize
self.top_to_bottom(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
return temp
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> str:
__lowerCamelCase : List[Any] = Heap()
__lowerCamelCase : Optional[int] = [0] * len(UpperCAmelCase_ )
__lowerCamelCase : str = [-1] * len(UpperCAmelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase : Tuple = []
for vertex in range(len(UpperCAmelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCAmelCase_ )
heap.node_position.append(UpperCAmelCase_ )
__lowerCamelCase : Tuple = []
__lowerCamelCase : Dict = 1
__lowerCamelCase : str = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase : Any = 0
__lowerCamelCase : Any = distance
heap.heapify(UpperCAmelCase_ , UpperCAmelCase_ )
for _ in range(1 , len(UpperCAmelCase_ ) ):
__lowerCamelCase : List[Any] = heap.delete_minimum(UpperCAmelCase_ , UpperCAmelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase : Union[str, Any] = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCAmelCase_ )]
):
__lowerCamelCase : Dict = distance
heap.bottom_to_top(
UpperCAmelCase_ , heap.get_position(UpperCAmelCase_ ) , UpperCAmelCase_ , UpperCAmelCase_ )
__lowerCamelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
A__ : Tuple = int(input("""Enter number of edges: """).strip())
A__ : str = defaultdict(list)
for _ in range(edges_number):
A__ : Optional[int] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class _a ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = tempfile.mkdtemp()
# fmt: off
a__ : Any = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
a__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a__ : Tuple = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
a__ : Union[str, Any] = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _A ( self , **__UpperCAmelCase ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _A ( self , **__UpperCAmelCase ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _A ( self ):
"""simple docstring"""
a__ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
a__ : List[str] = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _A ( self ):
"""simple docstring"""
a__ : List[str] = self.get_tokenizer()
a__ : Optional[int] = self.get_image_processor()
a__ : Union[str, Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(self.tmpdirname )
a__ : Tuple = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : Optional[Any] = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
a__ : Any = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
a__ : Optional[int] = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
a__ : List[str] = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : int = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
a__ : Any = self.prepare_image_inputs()
a__ : Optional[int] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors="np" )
a__ : Union[str, Any] = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _A ( self ):
"""simple docstring"""
a__ : Dict = self.get_image_processor()
a__ : str = self.get_tokenizer()
a__ : Any = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
a__ : List[Any] = 'lower newer'
a__ : Optional[int] = processor(text=SCREAMING_SNAKE_CASE_ )
a__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = self.get_image_processor()
a__ : Any = self.get_tokenizer()
a__ : int = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
a__ : int = 'lower newer'
a__ : List[str] = self.prepare_image_inputs()
a__ : Any = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with self.assertRaises(SCREAMING_SNAKE_CASE_ ):
processor()
def _A ( self ):
"""simple docstring"""
a__ : Tuple = self.get_image_processor()
a__ : int = self.get_tokenizer()
a__ : Optional[Any] = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
a__ : int = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
a__ : int = processor.batch_decode(SCREAMING_SNAKE_CASE_ )
a__ : Tuple = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _A ( self ):
"""simple docstring"""
a__ : List[str] = self.get_image_processor()
a__ : List[str] = self.get_tokenizer()
a__ : int = VisionTextDualEncoderProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
a__ : Any = 'lower newer'
a__ : int = self.prepare_image_inputs()
a__ : Any = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 191 |
'''simple docstring'''
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_00 ) -> int:
__lowerCamelCase : Union[str, Any] = n * (n + 1) * (2 * n + 1) / 6
__lowerCamelCase : Union[str, Any] = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :List[str] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
SCREAMING_SNAKE_CASE :List[Any] = 5_00_03
SCREAMING_SNAKE_CASE :int = 5_00_02
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PLBartTokenizer
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = False
def lowerCAmelCase__ ( self : int ) -> Tuple:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = PLBartTokenizer(SCREAMING_SNAKE_CASE_ , language_codes="base" , keep_accents=SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase__ ( self : List[str] ) -> Dict:
"""simple docstring"""
snake_case_ = PLBartTokenizer(SCREAMING_SNAKE_CASE_ , language_codes="base" , keep_accents=SCREAMING_SNAKE_CASE_ )
snake_case_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
snake_case_ = tokenizer.vocab_size
snake_case_ = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) for x in range(end - 4 , SCREAMING_SNAKE_CASE_ )]
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["__java__", "__python__", "__en_XX__", "<mask>"] )
snake_case_ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , )
def lowerCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
snake_case_ = PLBartTokenizer(SCREAMING_SNAKE_CASE_ , language_codes="multi" , keep_accents=SCREAMING_SNAKE_CASE_ )
snake_case_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
snake_case_ = tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case_ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
snake_case_ = tokenizer.vocab_size
snake_case_ = [tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ ) for x in range(end - 7 , SCREAMING_SNAKE_CASE_ )]
self.assertListEqual(
SCREAMING_SNAKE_CASE_ , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
snake_case_ = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
snake_case_ = tokenizer(SCREAMING_SNAKE_CASE_ ).input_ids
self.assertEqual(
tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 'uclanlp/plbart-python-en_XX'
_SCREAMING_SNAKE_CASE = [
'def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])',
'def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])',
]
_SCREAMING_SNAKE_CASE = [
'Returns the maximum value of a b c.',
'Sums the values of a b c.',
]
_SCREAMING_SNAKE_CASE = [
134,
5452,
33460,
33441,
33463,
33465,
33463,
33449,
988,
20,
33456,
19,
33456,
771,
39,
4258,
889,
3318,
33441,
33463,
33465,
33463,
33449,
2471,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase__ ( cls : Union[str, Any] ) -> str:
"""simple docstring"""
snake_case_ = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
snake_case_ = 1
return cls
def lowerCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def lowerCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.assertIn(SCREAMING_SNAKE_CASE_ , self.tokenizer.all_special_ids )
snake_case_ = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
snake_case_ = self.tokenizer.decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
snake_case_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertNotIn(self.tokenizer.eos_token , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 2_0]
self.assertIsInstance(src_text[0] , SCREAMING_SNAKE_CASE_ )
snake_case_ = 1_0
snake_case_ = self.tokenizer(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ = tempfile.mkdtemp()
snake_case_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
snake_case_ = PLBartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , SCREAMING_SNAKE_CASE_ )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
snake_case_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
snake_case_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , SCREAMING_SNAKE_CASE_ )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
snake_case_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
snake_case_ = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
snake_case_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , SCREAMING_SNAKE_CASE_ )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase__ ( self : Dict ) -> Tuple:
"""simple docstring"""
snake_case_ = self.tokenizer(self.src_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=3 , return_tensors="pt" )
snake_case_ = self.tokenizer(
text_target=self.tgt_text , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , max_length=1_0 , return_tensors="pt" )
snake_case_ = targets['input_ids']
snake_case_ = shift_tokens_right(SCREAMING_SNAKE_CASE_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase__ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case_ = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 283 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.0_2 , ) -> Optional[int]:
__lowerCamelCase : Optional[int] = parent
__lowerCamelCase : Dict = batch_size
__lowerCamelCase : int = image_size
__lowerCamelCase : List[str] = patch_size
__lowerCamelCase : Optional[int] = num_channels
__lowerCamelCase : Any = is_training
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : List[Any] = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Union[str, Any] = hidden_act
__lowerCamelCase : Optional[int] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : str = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (image_size // patch_size) ** 2
__lowerCamelCase : Optional[int] = num_patches + 1
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : Optional[int] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , )
return config, pixel_values
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
__lowerCamelCase : Union[str, Any] = FlaxViTModel(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = model(SCREAMING_SNAKE_CASE_ )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__lowerCamelCase : str = (self.image_size, self.image_size)
__lowerCamelCase : str = (self.patch_size, self.patch_size)
__lowerCamelCase : Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
__lowerCamelCase : Tuple = self.type_sequence_label_size
__lowerCamelCase : Any = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCamelCase : List[str] = 1
__lowerCamelCase : List[Any] = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : List[Any] = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) : int = config_and_inputs
__lowerCamelCase : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : str = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def lowercase_ ( self ) -> None:
__lowerCamelCase : str = FlaxViTModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def lowercase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : List[str] = [*signature.parameters.keys()]
__lowerCamelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__lowerCamelCase : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return model(pixel_values=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
with self.subTest('JIT Enabled' ):
__lowerCamelCase : Optional[int] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
__lowerCamelCase : Union[str, Any] = model_jitted(**SCREAMING_SNAKE_CASE_ ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase_ ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
__lowerCamelCase : Union[str, Any] = model_class_name.from_pretrained('google/vit-base-patch16-224' )
__lowerCamelCase : Union[str, Any] = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
import darl # noqa
import gym
import tqdm
from diffusers.experimental import ValueGuidedRLPipeline
snake_case_ = {
"""n_samples""": 64,
"""horizon""": 32,
"""num_inference_steps""": 20,
"""n_guide_steps""": 2, # can set to 0 for faster sampling, does not use value network
"""scale_grad_by_std""": True,
"""scale""": 0.1,
"""eta""": 0.0,
"""t_grad_cutoff""": 2,
"""device""": """cpu""",
}
if __name__ == "__main__":
snake_case_ = """hopper-medium-v2"""
snake_case_ = gym.make(env_name)
snake_case_ = ValueGuidedRLPipeline.from_pretrained(
'bglick13/hopper-medium-v2-value-function-hor32',
env=env,
)
env.seed(0)
snake_case_ = env.reset()
snake_case_ = 0
snake_case_ = 0
snake_case_ = 1000
snake_case_ = [obs.copy()]
try:
for t in tqdm.tqdm(range(T)):
# call the policy
snake_case_ = pipeline(obs, planning_horizon=32)
# execute action in environment
snake_case_ = env.step(denorm_actions)
snake_case_ = env.get_normalized_score(total_reward)
# update return
total_reward += reward
total_score += score
print(
F'Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:'
F' {total_score}'
)
# save observations for rendering
rollout.append(next_observation.copy())
snake_case_ = next_observation
except KeyboardInterrupt:
pass
print(F'Total reward: {total_reward}')
| 592 |
'''simple docstring'''
import argparse
A__ : Optional[Any] = """docs/source/_static/js/custom.js"""
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] ) -> int:
with open(UpperCAmelCase_ , encoding='utf-8' , newline='\n' ) as f:
__lowerCamelCase : Dict = f.readlines()
__lowerCamelCase : Tuple = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
__lowerCamelCase : Dict = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(UpperCAmelCase_ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(UpperCAmelCase_ )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument("""--version""", help="""Release version.""")
A__ : Any = parser.parse_args()
update_custom_js(args.version)
| 13 | 0 |
from __future__ import annotations
from collections.abc import Iterator
class snake_case__ :
def __init__( self : List[str] , _lowerCamelCase : Tuple ):
snake_case__ : Tuple = value
snake_case__ : Node | None = None
snake_case__ : Node | None = None
class snake_case__ :
def __init__( self : List[Any] , _lowerCamelCase : List[str] ):
snake_case__ : List[str] = tree
def UpperCAmelCase__ ( self : Dict , _lowerCamelCase : Optional[Any] ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self : Tuple ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[Any] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> Any:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[int] = hidden_states.shape
__lowerCamelCase : Dict = jax.image.resize(
SCREAMING_SNAKE_CASE_ , shape=(batch, height * 2, width * 2, channels) , method='nearest' , )
__lowerCamelCase : Optional[Any] = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : str = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# pad = ((0, 0), (0, 1), (0, 1), (0, 0)) # pad height and width dim
# hidden_states = jnp.pad(hidden_states, pad_width=pad)
__lowerCamelCase : str = self.conv(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int = None
lowerCamelCase : float = 0.0
lowerCamelCase : bool = None
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase : Optional[Any] = self.in_channels if self.out_channels is None else self.out_channels
__lowerCamelCase : Optional[Any] = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : Tuple = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : List[str] = nn.Dense(SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
__lowerCamelCase : Dict = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__lowerCamelCase : int = nn.Dropout(self.dropout_prob )
__lowerCamelCase : Union[str, Any] = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__lowerCamelCase : Optional[int] = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__lowerCamelCase : List[Any] = None
if use_nin_shortcut:
__lowerCamelCase : Any = nn.Conv(
SCREAMING_SNAKE_CASE_ , kernel_size=(1, 1) , strides=(1, 1) , padding='VALID' , dtype=self.dtype , )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
__lowerCamelCase : List[Any] = hidden_states
__lowerCamelCase : str = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self.conva(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self.time_emb_proj(nn.swish(SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : List[str] = jnp.expand_dims(jnp.expand_dims(SCREAMING_SNAKE_CASE_ , 1 ) , 1 )
__lowerCamelCase : Optional[int] = hidden_states + temb
__lowerCamelCase : List[Any] = self.norma(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = nn.swish(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.dropout(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.conva(SCREAMING_SNAKE_CASE_ )
if self.conv_shortcut is not None:
__lowerCamelCase : List[str] = self.conv_shortcut(SCREAMING_SNAKE_CASE_ )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import string
from itertools import cycle, product
from pathlib import Path
__snake_case = (
string.ascii_letters + string.digits + string.punctuation + string.whitespace
)
__snake_case = [ord(letter) for letter in string.ascii_lowercase]
__snake_case = {ord(char) for char in VALID_CHARS}
__snake_case = ["the", "be", "to", "of", "and", "in", "that", "have"]
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->str | None:
lowercase_ = ""
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
for keychar, cipherchar in zip(cycle(UpperCAmelCase_ ) , UpperCAmelCase_ ):
lowercase_ = cipherchar ^ keychar
if decodedchar not in VALID_INTS:
return None
decoded += chr(UpperCAmelCase_ )
return decoded
def A_ ( SCREAMING_SNAKE_CASE_ ) ->list[str]:
lowercase_ = []
for key in product(UpperCAmelCase_ , repeat=3 ):
lowercase_ = try_key(UpperCAmelCase_ , UpperCAmelCase_ )
if encoded is not None:
possibles.append(UpperCAmelCase_ )
return possibles
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->list[str]:
return [possible for possible in possibles if common_word in possible.lower()]
def A_ ( SCREAMING_SNAKE_CASE_ = "p059_cipher.txt" ) ->int:
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
lowercase_ = Path(UpperCAmelCase_ ).parent.joinpath(UpperCAmelCase_ ).read_text(encoding="""utf-8""" )
lowercase_ = [int(UpperCAmelCase_ ) for number in data.strip().split(""",""" )]
lowercase_ = filter_valid_chars(UpperCAmelCase_ )
for common_word in COMMON_WORDS:
lowercase_ = filter_common_word(UpperCAmelCase_ , UpperCAmelCase_ )
if len(UpperCAmelCase_ ) == 1:
break
lowercase_ = possibles[0]
return sum(ord(UpperCAmelCase_ ) for char in decoded_text )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 451 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _SCREAMING_SNAKE_CASE :
@staticmethod
def _UpperCAmelCase ( *snake_case_ : int , **snake_case_ : Any ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
lowerCamelCase_ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _UpperCAmelCase ( self : Any , snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : Any ):
"""simple docstring"""
A : Dict = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _UpperCAmelCase ( self : int , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] ):
"""simple docstring"""
A : Optional[int] = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'''score''': ANY(SCREAMING_SNAKE_CASE_ ),
'''label''': ANY(SCREAMING_SNAKE_CASE_ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE_ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
import datasets
A : Any = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
A : str = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
A : Any = object_detector(SCREAMING_SNAKE_CASE_ , threshold=0.0 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
for outputs in batch_outputs:
self.assertGreater(len(SCREAMING_SNAKE_CASE_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
SCREAMING_SNAKE_CASE_ , {
'''score''': ANY(SCREAMING_SNAKE_CASE_ ),
'''label''': ANY(SCREAMING_SNAKE_CASE_ ),
'''box''': {'''xmin''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymin''': ANY(SCREAMING_SNAKE_CASE_ ), '''xmax''': ANY(SCREAMING_SNAKE_CASE_ ), '''ymax''': ANY(SCREAMING_SNAKE_CASE_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@require_torch
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : Union[str, Any] = 'hf-internal-testing/tiny-detr-mobilenetsv3'
A : Optional[Any] = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
A : Optional[int] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
A : Any = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
A : Optional[int] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
] , )
A : Tuple = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 159, '''ymin''': 120, '''xmax''': 480, '''ymax''': 359}},
],
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
A : int = 'facebook/detr-resnet-50'
A : Dict = AutoModelForObjectDetection.from_pretrained(SCREAMING_SNAKE_CASE_ )
A : List[Any] = AutoFeatureExtractor.from_pretrained(SCREAMING_SNAKE_CASE_ )
A : Optional[int] = ObjectDetectionPipeline(model=SCREAMING_SNAKE_CASE_ , feature_extractor=SCREAMING_SNAKE_CASE_ )
A : Tuple = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
A : Dict = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
A : Optional[int] = 'facebook/detr-resnet-50'
A : str = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE_ )
A : int = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
A : str = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 175, '''ymax''': 117}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 333, '''ymin''': 72, '''xmax''': 368, '''ymax''': 187}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 639, '''ymax''': 473}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
],
] , )
@require_torch
@slow
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : List[Any] = 0.99_85
A : Optional[int] = 'facebook/detr-resnet-50'
A : int = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE_ )
A : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=SCREAMING_SNAKE_CASE_ )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 314, '''ymax''': 470}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 345, '''ymin''': 23, '''xmax''': 640, '''ymax''': 368}},
] , )
@require_torch
@require_pytesseract
@slow
def _UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
A : Optional[Any] = 'Narsil/layoutlmv3-finetuned-funsd'
A : Dict = 0.99_93
A : int = pipeline('''object-detection''' , model=SCREAMING_SNAKE_CASE_ , threshold=SCREAMING_SNAKE_CASE_ )
A : int = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4 ) , [
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 294, '''ymin''': 254, '''xmax''': 343, '''ymax''': 264}},
] , ) | 256 |
'''simple docstring'''
from collections import defaultdict
from math import gcd
def UpperCAmelCase__ ( UpperCAmelCase_ : int = 1_50_00_00 ) -> int:
__lowerCamelCase : defaultdict = defaultdict(UpperCAmelCase_ )
__lowerCamelCase : Any = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , UpperCAmelCase_ , 2 ):
if gcd(UpperCAmelCase_ , UpperCAmelCase_ ) > 1:
continue
__lowerCamelCase : Any = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(UpperCAmelCase_ , limit + 1 , UpperCAmelCase_ ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 13 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 77 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
A__ : str = logging.get_logger(__name__)
A__ : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
A__ : Tuple = {
"""vocab_file""": {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt"""
),
}
}
A__ : str = {
"""junnyu/roformer_chinese_small""": 1536,
"""junnyu/roformer_chinese_base""": 1536,
"""junnyu/roformer_chinese_char_small""": 512,
"""junnyu/roformer_chinese_char_base""": 512,
"""junnyu/roformer_small_discriminator""": 128,
"""junnyu/roformer_small_generator""": 128,
}
A__ : Tuple = {
"""junnyu/roformer_chinese_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_base""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_small""": {"""do_lower_case""": True},
"""junnyu/roformer_chinese_char_base""": {"""do_lower_case""": True},
"""junnyu/roformer_small_discriminator""": {"""do_lower_case""": True},
"""junnyu/roformer_small_generator""": {"""do_lower_case""": True},
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : Dict = RoFormerTokenizer
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_="[UNK]" , SCREAMING_SNAKE_CASE_="[SEP]" , SCREAMING_SNAKE_CASE_="[PAD]" , SCREAMING_SNAKE_CASE_="[CLS]" , SCREAMING_SNAKE_CASE_="[MASK]" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
super().__init__(
SCREAMING_SNAKE_CASE_ , tokenizer_file=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , sep_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , cls_token=SCREAMING_SNAKE_CASE_ , mask_token=SCREAMING_SNAKE_CASE_ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE_ , strip_accents=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__lowerCamelCase : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get('lowercase' , SCREAMING_SNAKE_CASE_ ) != do_lower_case
or pre_tok_state.get('strip_accents' , SCREAMING_SNAKE_CASE_ ) != strip_accents
):
__lowerCamelCase : Optional[int] = getattr(SCREAMING_SNAKE_CASE_ , pre_tok_state.pop('type' ) )
__lowerCamelCase : Union[str, Any] = do_lower_case
__lowerCamelCase : str = strip_accents
__lowerCamelCase : Optional[Any] = pre_tok_class(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = do_lower_case
def __getstate__( self ) -> List[str]:
__lowerCamelCase : Union[str, Any] = self.__dict__.copy()
__lowerCamelCase : Dict = BertPreTokenizer()
return state
def __setstate__( self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = d
__lowerCamelCase : List[Any] = self.__dict__['_tokenizer'].get_vocab()
__lowerCamelCase : Union[str, Any] = PreTokenizer.custom(JiebaPreTokenizer(SCREAMING_SNAKE_CASE_ ) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
__lowerCamelCase : List[str] = [self.sep_token_id]
__lowerCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
__lowerCamelCase : Optional[Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE_ , name=SCREAMING_SNAKE_CASE_ )
return tuple(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , **SCREAMING_SNAKE_CASE_ , ) -> Any:
__lowerCamelCase : Tuple = BertPreTokenizer()
return super().save_pretrained(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = UniSpeechSatForSequenceClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
A_ = downstream_dict['projector.weight']
A_ = downstream_dict['projector.bias']
A_ = downstream_dict['model.post_net.linear.weight']
A_ = downstream_dict['model.post_net.linear.bias']
return model
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = UniSpeechSatForAudioFrameClassification.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
A_ = downstream_dict['model.linear.weight']
A_ = downstream_dict['model.linear.bias']
return model
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = UniSpeechSatForXVector.from_pretrained(UpperCAmelCase_ , config=UpperCAmelCase_ )
A_ = downstream_dict['connector.weight']
A_ = downstream_dict['connector.bias']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A_ = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
A_ = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
A_ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight']
A_ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias']
A_ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight']
A_ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias']
A_ = downstream_dict['objective.W']
return model
@torch.no_grad()
def a_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
A_ = torch.load(UpperCAmelCase_ , map_location="cpu" )
A_ = checkpoint['Downstream']
A_ = UniSpeechSatConfig.from_pretrained(UpperCAmelCase_ )
A_ = WavaVecaFeatureExtractor.from_pretrained(
UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , do_normalize=UpperCAmelCase_ )
A_ = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
A_ = convert_classification(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith("ForAudioFrameClassification" ):
A_ = convert_diarization(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
elif arch.endswith("ForXVector" ):
A_ = convert_xvector(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
A_ = checkpoint['Featurizer']['weights']
hf_feature_extractor.save_pretrained(UpperCAmelCase_ )
hf_model.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__SCREAMING_SNAKE_CASE : Optional[int] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 452 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
A__ : int = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
A__ : Dict = TaTokenizerFast
A__ : Dict = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Any = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : str = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
A__ : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 13 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def _lowerCamelCase ( UpperCAmelCase_ : Sequence[float], UpperCAmelCase_ : bool = False ) -> float:
"""simple docstring"""
if not arr:
return 0
A__ = 0 if allow_empty_subarrays else float("-inf" )
A__ = 0.0
for num in arr:
A__ = max(0 if allow_empty_subarrays else num, curr_sum + num )
A__ = max(UpperCAmelCase_, UpperCAmelCase_ )
return max_sum
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(f'{max_subarray_sum(nums) = }')
| 104 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class UpperCAmelCase_ (tf.keras.optimizers.schedules.LearningRateSchedule ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 1.0 , SCREAMING_SNAKE_CASE_ = None , ) -> Any:
super().__init__()
__lowerCamelCase : Optional[Any] = initial_learning_rate
__lowerCamelCase : Optional[Any] = warmup_steps
__lowerCamelCase : Union[str, Any] = power
__lowerCamelCase : Optional[int] = decay_schedule_fn
__lowerCamelCase : Any = name
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : str = tf.cast(SCREAMING_SNAKE_CASE_ , tf.floataa )
__lowerCamelCase : Optional[int] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[Any] = global_step_float / warmup_steps_float
__lowerCamelCase : Optional[Any] = self.initial_learning_rate * tf.math.pow(SCREAMING_SNAKE_CASE_ , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=SCREAMING_SNAKE_CASE_ , )
def lowercase_ ( self ) -> Optional[Any]:
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def UpperCAmelCase__ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 0.9 , UpperCAmelCase_ : float = 0.999 , UpperCAmelCase_ : float = 1e-8 , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : Optional[List[str]] = None , ) -> int:
__lowerCamelCase : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase_ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase_ , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase_ , decay_schedule_fn=UpperCAmelCase_ , warmup_steps=UpperCAmelCase_ , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase_ , weight_decay_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCAmelCase_ , )
else:
__lowerCamelCase : Tuple = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , beta_a=UpperCAmelCase_ , epsilon=UpperCAmelCase_ , clipnorm=UpperCAmelCase_ , global_clipnorm=UpperCAmelCase_ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ = 0.0_0_1 , SCREAMING_SNAKE_CASE_ = 0.9 , SCREAMING_SNAKE_CASE_ = 0.9_9_9 , SCREAMING_SNAKE_CASE_ = 1E-7 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = "AdamWeightDecay" , **SCREAMING_SNAKE_CASE_ , ) -> int:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = weight_decay_rate
__lowerCamelCase : str = include_in_weight_decay
__lowerCamelCase : List[Any] = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls , SCREAMING_SNAKE_CASE_ ) -> Dict:
__lowerCamelCase : Any = {'WarmUp': WarmUp}
return super(SCREAMING_SNAKE_CASE_ , cls ).from_config(SCREAMING_SNAKE_CASE_ , custom_objects=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super(SCREAMING_SNAKE_CASE_ , self )._prepare_local(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Tuple = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase , __lowerCamelCase : Optional[Any] = list(zip(*SCREAMING_SNAKE_CASE_ ) )
return super(SCREAMING_SNAKE_CASE_ , self ).apply_gradients(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , name=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Optional[int] = apply_state or {}
__lowerCamelCase : Dict = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[Any] = self._fallback_apply_state(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
__lowerCamelCase , __lowerCamelCase : Dict = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_dense(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self._get_lr(var.device , var.dtype.base_dtype , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self._decay_weights_op(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with tf.control_dependencies([decay] ):
return super(SCREAMING_SNAKE_CASE_ , self )._resource_apply_sparse(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : Any = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ ) -> Dict:
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) is not None:
return False
return True
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self ) -> Tuple:
__lowerCamelCase : Tuple = []
__lowerCamelCase : Optional[Any] = None
@property
def lowercase_ ( self ) -> List[str]:
if self._accum_steps is None:
__lowerCamelCase : Tuple = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self ) -> List[str]:
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self , SCREAMING_SNAKE_CASE_ ) -> str:
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(SCREAMING_SNAKE_CASE_ ) , trainable=SCREAMING_SNAKE_CASE_ , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(SCREAMING_SNAKE_CASE_ ) != len(self._gradients ):
raise ValueError(f'Expected {len(self._gradients )} gradients, but got {len(SCREAMING_SNAKE_CASE_ )}' )
for accum_gradient, gradient in zip(self._gradients , SCREAMING_SNAKE_CASE_ ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(SCREAMING_SNAKE_CASE_ )
self._accum_steps.assign_add(1 )
def lowercase_ ( self ) -> int:
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(SCREAMING_SNAKE_CASE_ ) )
| 13 | 0 |
import math
def _snake_case (_snake_case : int) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCAmelCase_) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _snake_case (_snake_case : int = 1_0001) -> int:
try:
_lowercase =int(UpperCAmelCase_)
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.') from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.')
_lowercase =[]
_lowercase =2
while len(UpperCAmelCase_) < nth:
if is_prime(UpperCAmelCase_):
primes.append(UpperCAmelCase_)
num += 1
else:
num += 1
return primes[len(UpperCAmelCase_) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 181 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=[1, 2, 1] , SCREAMING_SNAKE_CASE_=[2, 2, 4] , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2.0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.0 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=["stage1", "stage2", "stage3"] , SCREAMING_SNAKE_CASE_=[1, 2, 3] , ) -> Any:
__lowerCamelCase : Optional[Any] = parent
__lowerCamelCase : int = batch_size
__lowerCamelCase : Optional[int] = image_size
__lowerCamelCase : Optional[int] = patch_size
__lowerCamelCase : Optional[Any] = num_channels
__lowerCamelCase : Dict = embed_dim
__lowerCamelCase : List[Any] = depths
__lowerCamelCase : int = num_heads
__lowerCamelCase : Optional[Any] = window_size
__lowerCamelCase : Optional[Any] = mlp_ratio
__lowerCamelCase : List[str] = qkv_bias
__lowerCamelCase : List[str] = hidden_dropout_prob
__lowerCamelCase : int = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = drop_path_rate
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : Union[str, Any] = use_absolute_embeddings
__lowerCamelCase : Any = patch_norm
__lowerCamelCase : Optional[Any] = layer_norm_eps
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Dict = is_training
__lowerCamelCase : Optional[Any] = scope
__lowerCamelCase : Dict = use_labels
__lowerCamelCase : List[str] = type_sequence_label_size
__lowerCamelCase : Dict = encoder_stride
__lowerCamelCase : Union[str, Any] = out_features
__lowerCamelCase : str = out_indices
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCamelCase : List[str] = None
if self.use_labels:
__lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self ) -> Optional[int]:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
__lowerCamelCase : Dict = MaskFormerSwinModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCamelCase : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
__lowerCamelCase : Tuple = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Any = model(SCREAMING_SNAKE_CASE_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = ['stem']
__lowerCamelCase : Optional[Any] = MaskFormerSwinBackbone(config=SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Optional[Any] = config_and_inputs
__lowerCamelCase : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
lowerCamelCase : int = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
lowerCamelCase : int = False
lowerCamelCase : int = False
lowerCamelCase : str = False
lowerCamelCase : int = False
lowerCamelCase : Union[str, Any] = False
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
__lowerCamelCase : Optional[Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def lowercase_ ( self ) -> int:
pass
def lowercase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ) -> Tuple:
return
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Swin does not use inputs_embeds' )
def lowercase_ ( self ) -> Optional[int]:
pass
@unittest.skip('Swin does not support feedforward chunking' )
def lowercase_ ( self ) -> Dict:
pass
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCamelCase : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def lowercase_ ( self ) -> Optional[int]:
__lowerCamelCase , __lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCamelCase : List[str] = model_class(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCamelCase : str = [*signature.parameters.keys()]
__lowerCamelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def lowercase_ ( self ) -> List[Any]:
pass
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
__lowerCamelCase : Tuple = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
__lowerCamelCase : int = outputs.hidden_states
__lowerCamelCase : Tuple = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
# Swin has a different seq_length
__lowerCamelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCamelCase : Dict = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Optional[int] = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Any:
__lowerCamelCase , __lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Union[str, Any] = 3
__lowerCamelCase : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCamelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCamelCase : Optional[int] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCamelCase : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCamelCase : str = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCamelCase : Tuple = True
self.check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def lowercase_ ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Any:
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def lowercase_ ( self ) -> Union[str, Any]:
pass
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase , __lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = 0
return t
def check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_={} ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).to_tuple()
def recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if isinstance(SCREAMING_SNAKE_CASE_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , set_nan_tensor_to_zero(SCREAMING_SNAKE_CASE_ ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}. Dict has'
f' `nan`: {torch.isnan(SCREAMING_SNAKE_CASE_ ).any()} and `inf`: {torch.isinf(SCREAMING_SNAKE_CASE_ )}.'
) , )
recursive_check(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for model_class in self.all_model_classes:
__lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
__lowerCamelCase : Optional[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
__lowerCamelCase : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
check_equivalence(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase_ (unittest.TestCase , _UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
lowerCamelCase : List[str] = MaskFormerSwinConfig
def lowercase_ ( self ) -> Tuple:
__lowerCamelCase : List[str] = MaskFormerSwinModelTester(self )
def lowercase_ ( self ) -> Optional[Any]:
__lowerCamelCase , __lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCamelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCamelCase : Optional[Any] = backbone_class(SCREAMING_SNAKE_CASE_ )
backbone.to(SCREAMING_SNAKE_CASE_ )
backbone.eval()
__lowerCamelCase : int = backbone(**SCREAMING_SNAKE_CASE_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , SCREAMING_SNAKE_CASE_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCamelCase : Union[str, Any] = backbone(**SCREAMING_SNAKE_CASE_ , output_hidden_states=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCamelCase : Optional[int] = backbone(**SCREAMING_SNAKE_CASE_ , output_attentions=SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(outputs.attentions )
| 13 | 0 |
def a ( A__ : list , A__ : list ) -> float:
"""simple docstring"""
_validate_point(UpperCAmelCase_ )
_validate_point(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(a - b ) for a, b in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
def a ( A__ : list[float] ) -> None:
"""simple docstring"""
if point:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
for item in point:
if not isinstance(UpperCAmelCase_ , (int, float) ):
_lowercase =(
'Expected a list of numbers as input, found '
F'''{type(UpperCAmelCase_ ).__name__}'''
)
raise TypeError(UpperCAmelCase_ )
else:
_lowercase =F'''Expected a list of numbers as input, found {type(UpperCAmelCase_ ).__name__}'''
raise TypeError(UpperCAmelCase_ )
else:
raise ValueError('Missing an input' )
def a ( A__ : list , A__ : list ) -> float:
"""simple docstring"""
_validate_point(UpperCAmelCase_ )
_validate_point(UpperCAmelCase_ )
if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ):
raise ValueError('Both points must be in the same n-dimensional space' )
return float(sum(abs(x - y ) for x, y in zip(UpperCAmelCase_ , UpperCAmelCase_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 291 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
A__ : Dict = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCAmelCase__ ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]=None ) -> List[Any]:
require_version(deps[pkg] , UpperCAmelCase_ )
| 13 | 0 |
import re
def SCREAMING_SNAKE_CASE( __UpperCamelCase ) -> str:
if len(re.findall("[ATCG]" , UpperCAmelCase_ ) ) != len(UpperCAmelCase_ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191 |
'''simple docstring'''
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
A__ : List[str] = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None)
| 13 | 0 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : Any=1_3 , _lowerCAmelCase : Dict=7 , _lowerCAmelCase : int=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Optional[int]=9_9 , _lowerCAmelCase : Dict=3_2 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : List[str]=4 , _lowerCAmelCase : List[str]=3_7 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : Optional[Any]=5_1_2 , _lowerCAmelCase : List[Any]=1_6 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : str=0.02 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : str="None" , _lowerCAmelCase : Optional[Any]=3 , _lowerCAmelCase : Tuple=4 , _lowerCAmelCase : Optional[Any]=None , ) -> Optional[int]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = num_choices
snake_case_ = relative_attention
snake_case_ = position_biased_input
snake_case_ = pos_att_type
snake_case_ = scope
def lowerCAmelCase__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCAmelCase__ ( self : str , _lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : Dict ) -> Any:
"""simple docstring"""
snake_case_ = DebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )[0]
snake_case_ = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )[0]
snake_case_ = model(SCREAMING_SNAKE_CASE_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : int ) -> Tuple:
"""simple docstring"""
snake_case_ = DebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Dict , _lowerCAmelCase : Any ) -> Tuple:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = DebertaVaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] ) -> int:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = DebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
snake_case_ = DebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int ) -> str:
"""simple docstring"""
snake_case_ = DebertaVaForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
snake_case_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
snake_case_
) = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowerCAmelCase__ ( self : int ) -> str:
"""simple docstring"""
snake_case_ = DebertaVaModelTester(self )
snake_case_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def lowerCAmelCase__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Any ) -> Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self : Optional[Any] ) -> str:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self : int ) -> Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self : Tuple ) -> Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
@slow
def lowerCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = DebertaVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="Model not available yet" )
def lowerCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
@slow
def lowerCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = DebertaVaModel.from_pretrained("microsoft/deberta-v2-xlarge" )
snake_case_ = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
snake_case_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
# compare the actual values for a slice.
snake_case_ = torch.tensor(
[[[0.2_356, 0.1_948, 0.0_369], [-0.1_063, 0.3_586, -0.5_152], [-0.6_399, -0.0_259, -0.2_525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 283 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
A__ : Tuple = namedtuple("""covid_data""", """cases deaths recovered""")
def UpperCAmelCase__ ( UpperCAmelCase_ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
__lowerCamelCase : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(UpperCAmelCase_ ).content ).xpath(UpperCAmelCase_ ) )
A__ : str = """Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}"""
print(fmt.format(*covid_stats()))
| 13 | 0 |
import colorsys
from PIL import Image # type: ignore
def __UpperCAmelCase ( __a : float ,__a : float ,__a : int ) -> float:
"""simple docstring"""
_a : Dict = x
_a : Dict = y
for step in range(__a ): # noqa: B007
_a : Optional[Any] = a * a - b * b + x
_a : Dict = 2 * a * b + y
_a : List[str] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __UpperCAmelCase ( __a : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def __UpperCAmelCase ( __a : float ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(__a ,1 ,1 ) )
def __UpperCAmelCase ( __a : int = 800 ,__a : int = 600 ,__a : float = -0.6 ,__a : float = 0 ,__a : float = 3.2 ,__a : int = 50 ,__a : bool = True ,) -> Image.Image:
"""simple docstring"""
_a : List[str] = Image.new('''RGB''' ,(image_width, image_height) )
_a : List[Any] = img.load()
# loop through the image-coordinates
for image_x in range(__a ):
for image_y in range(__a ):
# determine the figure-coordinates based on the image-coordinates
_a : List[Any] = figure_width / image_width * image_height
_a : Dict = figure_center_x + (image_x / image_width - 0.5) * figure_width
_a : Dict = figure_center_y + (image_y / image_height - 0.5) * figure_height
_a : List[str] = get_distance(__a ,__a ,__a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_a : Optional[Any] = get_color_coded_rgb(__a )
else:
_a : Optional[int] = get_black_and_white_rgb(__a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a__ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 14 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 1 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 1 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 14 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
a__ = '''.'''
if __name__ == "__main__":
a__ = os.path.join(REPO_PATH, '''utils/documentation_tests.txt''')
a__ = []
a__ = []
with open(doctest_file_path) as fp:
for line in fp:
a__ = line.strip()
a__ = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
a__ = '''\n'''.join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError('''Files in `utils/documentation_tests.txt` are not in alphabetical order.''')
| 14 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 1 |
def __UpperCAmelCase ( __a : Union[str, Any] ) -> int:
"""simple docstring"""
if not head:
return True
# split the list to two parts
_a , _a : Optional[Any] = head.next, head
while fast and fast.next:
_a : List[Any] = fast.next.next
_a : List[str] = slow.next
_a : Any = slow.next
_a : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
_a : Union[str, Any] = None
while second:
_a : Union[str, Any] = second.next
_a : List[str] = node
_a : Optional[Any] = second
_a : List[str] = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_a : Tuple = node.next
_a : Any = head.next
return True
def __UpperCAmelCase ( __a : Tuple ) -> List[Any]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_a : Dict = head
while fast and fast.next:
_a , _a : List[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
_a : Optional[int] = [slow.val]
while slow.next:
_a : Union[str, Any] = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_a : List[Any] = cur.next
return True
def __UpperCAmelCase ( __a : str ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
_a : Tuple = {}
_a : List[Any] = 0
while head:
if head.val in d:
d[head.val].append(__a )
else:
_a : Dict = [pos]
_a : Dict = head.next
pos += 1
_a : Any = pos - 1
_a : List[str] = 0
for v in d.values():
if len(__a ) % 2 != 0:
middle += 1
else:
_a : Tuple = 0
for i in range(0 ,len(__a ) ):
if v[i] + v[len(__a ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 14 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 1 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = LEDConfig
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : Dict = "gelu"
def __init__( self , _a , _a=1_3 , _a=7 , _a=True , _a=False , _a=9_9 , _a=3_2 , _a=2 , _a=4 , _a=3_7 , _a=0.1 , _a=0.1 , _a=2_0 , _a=2 , _a=1 , _a=0 , _a=4 , ) -> Dict:
_a : Optional[Any] = parent
_a : str = batch_size
_a : Dict = seq_length
_a : Any = is_training
_a : Any = use_labels
_a : List[Any] = vocab_size
_a : Tuple = hidden_size
_a : Dict = num_hidden_layers
_a : str = num_attention_heads
_a : List[Any] = intermediate_size
_a : Any = hidden_dropout_prob
_a : Any = attention_probs_dropout_prob
_a : Tuple = max_position_embeddings
_a : List[str] = eos_token_id
_a : Any = pad_token_id
_a : int = bos_token_id
_a : Any = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_a : Dict = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_a : Tuple = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __lowercase ( self ) -> Optional[Any]:
_a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_a : Optional[int] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_a : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
_a : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a : Dict = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_a : Optional[int] = prepare_led_inputs_dict(_a , _a , _a )
_a : int = tf.concat(
[tf.zeros_like(_a )[:, :-1], tf.ones_like(_a )[:, -1:]] , axis=-1 , )
_a : str = global_attention_mask
return config, inputs_dict
def __lowercase ( self , _a , _a ) -> Any:
_a : Dict = TFLEDModel(config=_a ).get_decoder()
_a : Any = inputs_dict['''input_ids''']
_a : str = input_ids[:1, :]
_a : Optional[int] = inputs_dict['''attention_mask'''][:1, :]
_a : Optional[Any] = 1
# first forward pass
_a : int = model(_a , attention_mask=_a , use_cache=_a )
_a , _a : Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_a : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
_a : int = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_a : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_a : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_a : int = model(_a , attention_mask=_a )[0]
_a : Optional[Any] = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_a : str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_a : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_a : Tuple = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1e-3 )
def __UpperCAmelCase ( __a : int ,__a : str ,__a : Optional[Any] ,__a : Optional[int]=None ,__a : Optional[int]=None ,__a : Dict=None ,__a : List[Any]=None ,) -> str:
"""simple docstring"""
if attention_mask is None:
_a : Optional[int] = tf.cast(tf.math.not_equal(__a ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_a : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_a : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_a : Optional[Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCAmelCase__ : Optional[int] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCAmelCase__ : List[Any] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCAmelCase__ : str = True
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Dict = False
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = TFLEDModelTester(self )
_a : Any = ConfigTester(self , config_class=_a )
def __lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
def __lowercase ( self ) -> str:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_a : int = tf.zeros_like(inputs_dict['''attention_mask'''] )
_a : Tuple = 2
_a : Optional[Any] = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
_a : Tuple = True
_a : Union[str, Any] = self.model_tester.seq_length
_a : Dict = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_a ):
_a : List[str] = outputs.decoder_attentions
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_a ):
_a : Optional[int] = [t.numpy() for t in outputs.encoder_attentions]
_a : Optional[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_a : List[Any] = True
_a : str = False
_a : Any = False
_a : List[Any] = model_class(_a )
_a : Union[str, Any] = model(self._prepare_for_class(_a , _a ) )
_a : Any = len(_a )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
if self.is_encoder_decoder:
_a : str = model_class(_a )
_a : Dict = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_decoder_attentions_output(_a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_a : Union[str, Any] = True
_a : Tuple = model_class(_a )
_a : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
# Check attention is always last and order is fine
_a : Tuple = True
_a : Dict = True
_a : Union[str, Any] = model_class(_a )
_a : Optional[Any] = model(self._prepare_for_class(_a , _a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_a ) )
self.assertEqual(model.config.output_hidden_states , _a )
check_encoder_attentions_output(_a )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> List[str]:
# TODO: Head-masking not yet implement
pass
def __UpperCAmelCase ( __a : Tuple ) -> Any:
"""simple docstring"""
return tf.constant(__a ,dtype=tf.intaa )
a__ = 1E-4
@slow
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
_a : str = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
_a : Tuple = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
_a : Any = prepare_led_inputs_dict(model.config , _a , _a )
_a : Optional[int] = model(**_a )[0]
_a : str = (1, 1_0_2_4, 7_6_8)
self.assertEqual(output.shape , _a )
# change to expected output here
_a : str = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 )
def __lowercase ( self ) -> str:
_a : Any = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
_a : List[str] = _long_tensor([5_1_2 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
_a : Dict = _long_tensor([1_2_8 * [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9]] )
_a : Union[str, Any] = prepare_led_inputs_dict(model.config , _a , _a )
_a : Union[str, Any] = model(**_a )[0]
_a : Optional[int] = (1, 1_0_2_4, model.config.vocab_size)
self.assertEqual(output.shape , _a )
# change to expected output here
_a : List[str] = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _a , atol=1e-3 , rtol=1e-3 )
| 14 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 1 |
def __UpperCAmelCase ( __a : int ,__a : int ) -> str:
"""simple docstring"""
if not isinstance(__a ,__a ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(__a ,__a ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
_a : List[Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__a )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''Salesforce/blip-vqa-base''': '''https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json''',
'''Salesforce/blip-vqa-capfit-large''': (
'''https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-base''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json'''
),
'''Salesforce/blip-image-captioning-large''': (
'''https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json'''
),
'''Salesforce/blip-itm-base-coco''': '''https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-large-coco''': '''https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json''',
'''Salesforce/blip-itm-base-flikr''': '''https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json''',
'''Salesforce/blip-itm-large-flikr''': (
'''https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "blip_text_model"
def __init__( self , _a=3_0_5_2_4 , _a=7_6_8 , _a=7_6_8 , _a=3_0_7_2 , _a=7_6_8 , _a=1_2 , _a=8 , _a=5_1_2 , _a="gelu" , _a=1e-1_2 , _a=0.0 , _a=0.0 , _a=0.02 , _a=3_0_5_2_2 , _a=2 , _a=0 , _a=1_0_2 , _a=True , _a=True , **_a , ) -> Union[str, Any]:
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , sep_token_id=_a , **_a , )
_a : Union[str, Any] = vocab_size
_a : List[Any] = hidden_size
_a : str = encoder_hidden_size
_a : Any = intermediate_size
_a : Tuple = projection_dim
_a : List[str] = hidden_dropout_prob
_a : int = num_hidden_layers
_a : int = num_attention_heads
_a : int = max_position_embeddings
_a : Any = layer_norm_eps
_a : Optional[int] = hidden_act
_a : Union[str, Any] = initializer_range
_a : Tuple = attention_probs_dropout_prob
_a : Dict = is_decoder
_a : Tuple = use_cache
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Optional[int] = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
_a : Optional[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = "blip_vision_model"
def __init__( self , _a=7_6_8 , _a=3_0_7_2 , _a=5_1_2 , _a=1_2 , _a=1_2 , _a=3_8_4 , _a=1_6 , _a="gelu" , _a=1e-5 , _a=0.0 , _a=1e-1_0 , **_a , ) -> Optional[Any]:
super().__init__(**_a )
_a : str = hidden_size
_a : List[str] = intermediate_size
_a : Optional[int] = projection_dim
_a : Dict = num_hidden_layers
_a : Dict = num_attention_heads
_a : Union[str, Any] = patch_size
_a : List[Any] = image_size
_a : Tuple = initializer_range
_a : int = attention_dropout
_a : Optional[Any] = layer_norm_eps
_a : List[Any] = hidden_act
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Union[str, Any] = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('''model_type''' ) == "blip":
_a : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = "blip"
UpperCAmelCase__ : str = True
def __init__( self , _a=None , _a=None , _a=5_1_2 , _a=2.6592 , _a=2_5_6 , **_a , ) -> str:
super().__init__(**_a )
if text_config is None:
_a : List[str] = {}
logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' )
if vision_config is None:
_a : str = {}
logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' )
_a : Optional[Any] = BlipTextConfig(**_a )
_a : Optional[int] = BlipVisionConfig(**_a )
_a : str = self.vision_config.hidden_size
_a : Union[str, Any] = projection_dim
_a : str = logit_scale_init_value
_a : str = 1.0
_a : int = 0.02
_a : Optional[int] = image_text_hidden_size
@classmethod
def __lowercase ( cls , _a , _a , **_a ) -> Optional[Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_a )
def __lowercase ( self ) -> Optional[int]:
_a : Tuple = copy.deepcopy(self.__dict__ )
_a : str = self.text_config.to_dict()
_a : List[str] = self.vision_config.to_dict()
_a : List[Any] = self.__class__.model_type
return output
| 14 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 1 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/owlvit-base-patch32''': '''https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json''',
'''google/owlvit-base-patch16''': '''https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json''',
'''google/owlvit-large-patch14''': '''https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = "owlvit_text_model"
def __init__( self , _a=4_9_4_0_8 , _a=5_1_2 , _a=2_0_4_8 , _a=1_2 , _a=8 , _a=1_6 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , _a=0 , _a=4_9_4_0_6 , _a=4_9_4_0_7 , **_a , ) -> int:
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
_a : Tuple = vocab_size
_a : Tuple = hidden_size
_a : Optional[int] = intermediate_size
_a : Any = num_hidden_layers
_a : int = num_attention_heads
_a : Tuple = max_position_embeddings
_a : List[Any] = hidden_act
_a : str = layer_norm_eps
_a : Dict = attention_dropout
_a : Any = initializer_range
_a : Union[str, Any] = initializer_factor
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Optional[int] = cls.get_config_dict(_a , **_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a : int = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "owlvit_vision_model"
def __init__( self , _a=7_6_8 , _a=3_0_7_2 , _a=1_2 , _a=1_2 , _a=3 , _a=7_6_8 , _a=3_2 , _a="quick_gelu" , _a=1e-5 , _a=0.0 , _a=0.02 , _a=1.0 , **_a , ) -> Dict:
super().__init__(**_a )
_a : Dict = hidden_size
_a : Optional[Any] = intermediate_size
_a : Optional[Any] = num_hidden_layers
_a : int = num_attention_heads
_a : int = num_channels
_a : int = image_size
_a : Dict = patch_size
_a : List[str] = hidden_act
_a : Tuple = layer_norm_eps
_a : int = attention_dropout
_a : Any = initializer_range
_a : Dict = initializer_factor
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Tuple = cls.get_config_dict(_a , **_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
_a : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "owlvit"
UpperCAmelCase__ : str = True
def __init__( self , _a=None , _a=None , _a=5_1_2 , _a=2.6592 , _a=True , **_a , ) -> Any:
super().__init__(**_a )
if text_config is None:
_a : Union[str, Any] = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
_a : List[str] = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
_a : str = OwlViTTextConfig(**_a )
_a : Dict = OwlViTVisionConfig(**_a )
_a : int = projection_dim
_a : Optional[int] = logit_scale_init_value
_a : List[Any] = return_dict
_a : Optional[Any] = 1.0
@classmethod
def __lowercase ( cls , _a , **_a ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_a )
_a , _a : Dict = cls.get_config_dict(_a , **_a )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_a , **_a )
@classmethod
def __lowercase ( cls , _a , _a , **_a ) -> Dict:
_a : int = {}
_a : Optional[int] = text_config
_a : Optional[Any] = vision_config
return cls.from_dict(_a , **_a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Any = copy.deepcopy(self.__dict__ )
_a : Optional[int] = self.text_config.to_dict()
_a : Optional[Any] = self.vision_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def __lowercase ( self ) -> float:
return 1e-4
def __lowercase ( self , _a , _a = -1 , _a = -1 , _a = None , ) -> Mapping[str, Any]:
_a : Dict = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_a , seq_length=_a , framework=_a )
_a : Optional[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_a , framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def __lowercase ( self ) -> int:
return 1_4
| 14 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 1 |
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
a__ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self , _a , _a , _a = None , _a = None ) -> List[Any]:
_a : Union[str, Any] = None
_a : Optional[Any] = os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
_a : Optional[int] = os.path.abspath('''examples''' )
for item in os.listdir(_a ):
if item not in EXCLUDE_EXAMPLES:
_a : Any = os.path.join(_a , _a )
if os.path.isfile(_a ) and ".py" in item_path:
with self.subTest(
tested_script=_a , feature_script=_a , tested_section='''main()''' if parser_only else '''training_function()''' , ):
_a : Optional[int] = compare_against_test(
os.path.join(_a , _a ) , _a , _a , _a )
_a : Union[str, Any] = '''\n'''.join(_a )
if special_strings is not None:
for string in special_strings:
_a : Union[str, Any] = diff.replace(_a , '''''' )
self.assertEqual(_a , '''''' )
def __lowercase ( self ) -> Optional[Any]:
self.one_complete_example('''complete_nlp_example.py''' , _a )
self.one_complete_example('''complete_nlp_example.py''' , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Optional[int] = os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
_a : int = [
''' ''' * 1_6 + '''{\n\n''',
''' ''' * 2_0 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 2_0 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 2_0 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 2_0 + '''"epoch": epoch,\n\n''',
''' ''' * 1_6 + '''},\n\n''',
''' ''' * 1_6 + '''step=epoch,\n''',
''' ''' * 1_2,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , _a , _a , _a )
self.one_complete_example('''complete_cv_example.py''' , _a , _a , _a )
@mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "1"} )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = False
@classmethod
def __lowercase ( cls ) -> List[Any]:
super().setUpClass()
_a : str = tempfile.mkdtemp()
_a : str = os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_a : int = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def __lowercase ( cls ) -> Optional[int]:
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
_a : List[str] = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def __lowercase ( self ) -> Any:
_a : Dict = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
_a : str = run_command(self._launch_args + testargs , return_stdout=_a )
self.assertNotIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
def __lowercase ( self ) -> Dict:
_a : Optional[Any] = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
_a : Optional[int] = run_command(self._launch_args + testargs , return_stdout=_a )
if torch.cuda.is_available():
_a : List[Any] = torch.cuda.device_count()
else:
_a : Tuple = 1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
else:
self.assertIn('''epoch 0:''' , _a )
self.assertIn('''epoch 1:''' , _a )
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : List[str] = '''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
_a : Tuple = run_command(self._launch_args + testargs , return_stdout=_a )
_a : int = re.findall('''({.+})''' , _a )
_a : int = [r for r in results if '''accuracy''' in r][-1]
_a : Optional[Any] = ast.literal_eval(_a )
self.assertGreaterEqual(results['''accuracy'''] , 0.75 )
def __lowercase ( self ) -> str:
_a : Optional[int] = ['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
_a : str = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(_a , '''tracking''' ) ) )
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = ['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def __lowercase ( self ) -> List[Any]:
_a : Union[str, Any] = ['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 14 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __UpperCAmelCase ( __a : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(__a ,__a ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(__a ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
_a : Union[str, Any] = QuantumRegister(__a ,'''qr''' )
_a : List[Any] = ClassicalRegister(__a ,'''cr''' )
_a : str = QuantumCircuit(__a ,__a )
_a : Any = number_of_qubits
for i in range(__a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) ,__a ,__a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__a ,number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__a ,__a )
# simulate with 10000 shots
_a : Any = Aer.get_backend('''qasm_simulator''' )
_a : Optional[int] = execute(__a ,__a ,shots=10_000 )
return job.result().get_counts(__a )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 14 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 1 |
import os
import sys
import unittest
a__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
a__ = os.path.join(git_repo_path, '''src''', '''diffusers''')
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Any:
_a : Optional[Any] = find_backend(''' if not is_torch_available():''' )
self.assertEqual(_a , '''torch''' )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
_a : str = find_backend(''' if not (is_torch_available() and is_transformers_available()):''' )
self.assertEqual(_a , '''torch_and_transformers''' )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
_a : Optional[int] = find_backend(
''' if not (is_torch_available() and is_transformers_available() and is_onnx_available()):''' )
self.assertEqual(_a , '''torch_and_transformers_and_onnx''' )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('''torch''' , _a )
self.assertIn('''torch_and_transformers''' , _a )
self.assertIn('''flax_and_transformers''' , _a )
self.assertIn('''torch_and_transformers_and_onnx''' , _a )
# Likewise, we can't assert on the exact content of a key
self.assertIn('''UNet2DModel''' , objects['''torch'''] )
self.assertIn('''FlaxUNet2DConditionModel''' , objects['''flax'''] )
self.assertIn('''StableDiffusionPipeline''' , objects['''torch_and_transformers'''] )
self.assertIn('''FlaxStableDiffusionPipeline''' , objects['''flax_and_transformers'''] )
self.assertIn('''LMSDiscreteScheduler''' , objects['''torch_and_scipy'''] )
self.assertIn('''OnnxStableDiffusionPipeline''' , objects['''torch_and_transformers_and_onnx'''] )
def __lowercase ( self ) -> List[str]:
_a : Optional[int] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' )
self.assertEqual(_a , '''\nCONSTANT = None\n''' )
_a : int = create_dummy_object('''function''' , '''\'torch\'''' )
self.assertEqual(
_a , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' )
_a : Tuple = '''
class FakeClass(metaclass=DummyObject):
_backends = \'torch\'
def __init__(self, *args, **kwargs):
requires_backends(self, \'torch\')
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, \'torch\')
'''
_a : Dict = create_dummy_object('''FakeClass''' , '''\'torch\'''' )
self.assertEqual(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
_a : Dict = '''# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
CONSTANT = None
def function(*args, **kwargs):
requires_backends(function, ["torch"])
class FakeClass(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
'''
_a : List[str] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} )
self.assertEqual(dummy_files['''torch'''] , _a )
| 14 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 1 |
import copy
from collections import OrderedDict
from typing import Dict, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a__ = logging.get_logger(__name__)
a__ = {
'''facebook/detr-resnet-50''': '''https://huggingface.co/facebook/detr-resnet-50/resolve/main/config.json''',
# See all DETR models at https://huggingface.co/models?filter=detr
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = "detr"
UpperCAmelCase__ : int = ["past_key_values"]
UpperCAmelCase__ : int = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _a=True , _a=None , _a=3 , _a=1_0_0 , _a=6 , _a=2_0_4_8 , _a=8 , _a=6 , _a=2_0_4_8 , _a=8 , _a=0.0 , _a=0.0 , _a=True , _a="relu" , _a=2_5_6 , _a=0.1 , _a=0.0 , _a=0.0 , _a=0.02 , _a=1.0 , _a=False , _a="sine" , _a="resnet50" , _a=True , _a=False , _a=1 , _a=5 , _a=2 , _a=1 , _a=1 , _a=5 , _a=2 , _a=0.1 , **_a , ) -> Any:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
_a : Dict = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_a , _a ):
_a : Union[str, Any] = backbone_config.get('''model_type''' )
_a : int = CONFIG_MAPPING[backbone_model_type]
_a : Optional[int] = config_class.from_dict(_a )
# set timm attributes to None
_a , _a , _a : Any = None, None, None
_a : int = use_timm_backbone
_a : int = backbone_config
_a : Optional[int] = num_channels
_a : int = num_queries
_a : Dict = d_model
_a : Optional[Any] = encoder_ffn_dim
_a : Any = encoder_layers
_a : Optional[Any] = encoder_attention_heads
_a : Dict = decoder_ffn_dim
_a : Optional[Any] = decoder_layers
_a : Optional[int] = decoder_attention_heads
_a : str = dropout
_a : List[Any] = attention_dropout
_a : Dict = activation_dropout
_a : Union[str, Any] = activation_function
_a : Optional[Any] = init_std
_a : Dict = init_xavier_std
_a : Any = encoder_layerdrop
_a : List[str] = decoder_layerdrop
_a : int = encoder_layers
_a : int = auxiliary_loss
_a : Optional[Any] = position_embedding_type
_a : str = backbone
_a : List[Any] = use_pretrained_backbone
_a : Any = dilation
# Hungarian matcher
_a : str = class_cost
_a : int = bbox_cost
_a : List[str] = giou_cost
# Loss coefficients
_a : Union[str, Any] = mask_loss_coefficient
_a : Optional[Any] = dice_loss_coefficient
_a : List[str] = bbox_loss_coefficient
_a : List[Any] = giou_loss_coefficient
_a : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=_a , **_a )
@property
def __lowercase ( self ) -> int:
return self.encoder_attention_heads
@property
def __lowercase ( self ) -> int:
return self.d_model
@classmethod
def __lowercase ( cls , _a , **_a ) -> Optional[int]:
return cls(backbone_config=_a , **_a )
def __lowercase ( self ) -> Dict[str, any]:
_a : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
_a : Optional[Any] = self.backbone_config.to_dict()
_a : str = self.__class__.model_type
return output
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def __lowercase ( self ) -> float:
return 1e-5
@property
def __lowercase ( self ) -> int:
return 1_2
| 14 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 1 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
_a : Dict = '''ZinengTang/tvlt-base'''
_a : List[str] = tempfile.mkdtemp()
def __lowercase ( self , **_a ) -> int:
return TvltImageProcessor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self , **_a ) -> List[Any]:
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **_a )
def __lowercase ( self ) -> Optional[int]:
shutil.rmtree(self.tmpdirname )
def __lowercase ( self ) -> Dict:
_a : Union[str, Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Optional[int] = TvltProcessor(image_processor=_a , feature_extractor=_a )
processor.save_pretrained(self.tmpdirname )
_a : Any = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , _a )
self.assertIsInstance(processor.image_processor , _a )
def __lowercase ( self ) -> Any:
_a : Optional[Any] = self.get_image_processor()
_a : Dict = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : Union[str, Any] = np.ones([1_2_0_0_0] )
_a : Dict = feature_extractor(_a , return_tensors='''np''' )
_a : Tuple = processor(audio=_a , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Optional[Any] = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[Any] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = image_processor(_a , return_tensors='''np''' )
_a : Optional[int] = processor(images=_a , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __lowercase ( self ) -> Union[str, Any]:
_a : int = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Any = TvltProcessor(image_processor=_a , feature_extractor=_a )
_a : List[str] = np.ones([1_2_0_0_0] )
_a : Optional[int] = np.ones([3, 2_2_4, 2_2_4] )
_a : int = processor(audio=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __lowercase ( self ) -> Union[str, Any]:
_a : str = self.get_image_processor()
_a : Union[str, Any] = self.get_feature_extractor()
_a : Dict = TvltProcessor(image_processor=_a , feature_extractor=_a )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 14 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 14 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a__ = logging.get_logger(__name__)
# TODO: upload to AWS
a__ = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Any = "retribert"
def __init__( self , _a=3_0_5_2_2 , _a=7_6_8 , _a=8 , _a=1_2 , _a=3_0_7_2 , _a="gelu" , _a=0.1 , _a=0.1 , _a=5_1_2 , _a=2 , _a=0.02 , _a=1e-1_2 , _a=True , _a=1_2_8 , _a=0 , **_a , ) -> Optional[int]:
super().__init__(pad_token_id=_a , **_a )
_a : Any = vocab_size
_a : Optional[int] = hidden_size
_a : str = num_hidden_layers
_a : Optional[Any] = num_attention_heads
_a : List[str] = hidden_act
_a : str = intermediate_size
_a : List[str] = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : str = max_position_embeddings
_a : Union[str, Any] = type_vocab_size
_a : Union[str, Any] = initializer_range
_a : Tuple = layer_norm_eps
_a : int = share_encoders
_a : List[str] = projection_dim
| 14 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 | 1 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Optional[int] ) -> Dict:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[int] ,__a : int ,__a : List[str]="attention" ) -> List[str]:
"""simple docstring"""
_a : str = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Tuple = k_tmp.reshape(k_tmp.shape[0] ,k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] ,o_tmp.shape[2] )
_a : Union[str, Any] = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Any = q_tmp.reshape(q_tmp.shape[0] ,q_tmp.shape[1] * q_tmp.shape[2] )
_a : Tuple = np.ascontiguousarray(params[F"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] ,v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Union[str, Any] ,__a : List[Any] ,__a : Any=False ) -> Any:
"""simple docstring"""
if split_mlp_wi:
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Union[str, Any] = params[F"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : List[str] = (wi_a, wi_a)
else:
_a : List[str] = params[F"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Optional[int] = params[F"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __UpperCAmelCase ( __a : List[Any] ,__a : Optional[Any] ,__a : Union[str, Any] ,__a : str ) -> List[str]:
"""simple docstring"""
return params[F"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __UpperCAmelCase ( __a : dict ,*, __a : int ,__a : bool ,__a : bool = False ) -> Any:
"""simple docstring"""
_a : Dict = traverse_util.flatten_dict(variables['''target'''] )
_a : Any = {'''/'''.join(__a ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : Optional[int] = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' ,__a )
_a : Tuple = collections.OrderedDict()
# Shared embeddings.
_a : Any = old['''token_embedder/embedding''']
# Encoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Optional[Any] = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_attention_layer_norm''' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(__a ,__a ,'''encoder''' ,'''attention''' )
_a : List[str] = layer_norm
_a : Optional[Any] = k.T
_a : str = o.T
_a : List[Any] = q.T
_a : Tuple = v.T
# Block i, layer 1 (MLP).
_a : str = tax_layer_norm_lookup(__a ,__a ,'''encoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Any = tax_mlp_lookup(__a ,__a ,'''encoder''' ,__a )
_a : str = layer_norm
if split_mlp_wi:
_a : List[Any] = wi[0].T
_a : Any = wi[1].T
else:
_a : Any = wi.T
_a : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Dict = tax_relpos_bias_lookup(
__a ,__a ,'''encoder''' ).T
_a : List[str] = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
_a : List[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''encoder''' ).T
_a : Optional[Any] = tax_relpos_bias_lookup(
__a ,0 ,'''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(__a ):
# Block i, layer 0 (Self Attention).
_a : Union[str, Any] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_self_attention_layer_norm''' )
_a , _a , _a , _a : Optional[Any] = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''self_attention''' )
_a : Optional[Any] = layer_norm
_a : Dict = k.T
_a : str = o.T
_a : str = q.T
_a : List[str] = v.T
# Block i, layer 1 (Cross Attention).
_a : Any = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_cross_attention_layer_norm''' )
_a , _a , _a , _a : str = tax_attention_lookup(__a ,__a ,'''decoder''' ,'''encoder_decoder_attention''' )
_a : Optional[Any] = layer_norm
_a : Optional[int] = k.T
_a : Dict = o.T
_a : str = q.T
_a : int = v.T
# Block i, layer 2 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(__a ,__a ,'''decoder''' ,'''pre_mlp_layer_norm''' )
_a , _a : Tuple = tax_mlp_lookup(__a ,__a ,'''decoder''' ,__a )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : List[Any] = wi[1].T
else:
_a : Dict = wi.T
_a : str = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Tuple = tax_relpos_bias_lookup(__a ,__a ,'''decoder''' ).T
_a : Tuple = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : Any = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( __a : Dict ,__a : bool ) -> Tuple:
"""simple docstring"""
_a : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Any = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
_a : str = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( __a : List[str] ,__a : Union[str, Any] ,__a : Dict ,__a : Union[str, Any] ,__a : List[Any] ) -> int:
"""simple docstring"""
_a : List[str] = checkpoints.load_tax_checkpoint(__a )
_a : str = convert_tax_to_pytorch(
__a ,num_layers=config.num_layers ,is_encoder_only=__a ,scalable_attention=__a )
_a : str = make_state_dict(__a ,__a )
model.load_state_dict(__a ,strict=__a )
def __UpperCAmelCase ( __a : List[Any] ,__a : Any ,__a : Union[str, Any] ,__a : bool = False ,__a : bool = False ,) -> Optional[Any]:
"""simple docstring"""
_a : List[str] = MTaConfig.from_json_file(__a )
print(F"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(__a )
else:
_a : Tuple = UMTaForConditionalGeneration(__a )
# Load weights from tf checkpoint
load_tax_weights_in_ta(__a ,__a ,__a ,__a ,__a )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__a )
# Verify that we can load the checkpoint.
model.from_pretrained(__a )
print('''Done''' )
if __name__ == "__main__":
a__ = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
a__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 14 |
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
a__ = '''Usage of script: script_name <size_of_canvas:int>'''
a__ = [0] * 100 + [1] * 10
random.shuffle(choice)
def __UpperCAmelCase ( __a : int ) -> list[list[bool]]:
"""simple docstring"""
_a : int = [[False for i in range(__a )] for j in range(__a )]
return canvas
def __UpperCAmelCase ( __a : list[list[bool]] ) -> None:
"""simple docstring"""
for i, row in enumerate(__a ):
for j, _ in enumerate(__a ):
_a : Optional[int] = bool(random.getrandbits(1 ) )
def __UpperCAmelCase ( __a : list[list[bool]] ) -> list[list[bool]]:
"""simple docstring"""
_a : Any = np.array(__a )
_a : Optional[int] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__a ):
for c, pt in enumerate(__a ):
_a : Tuple = __judge_point(
__a ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
_a : List[str] = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
_a : list[list[bool]] = current_canvas.tolist()
return return_canvas
def __UpperCAmelCase ( __a : bool ,__a : list[list[bool]] ) -> bool:
"""simple docstring"""
_a : Optional[Any] = 0
_a : str = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
_a : Optional[int] = pt
if pt:
if alive < 2:
_a : Dict = False
elif alive == 2 or alive == 3:
_a : Optional[Any] = True
elif alive > 3:
_a : str = False
else:
if alive == 3:
_a : int = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
a__ = int(sys.argv[1])
# main working structure of this module.
a__ = create_canvas(canvas_size)
seed(c)
a__ , a__ = plt.subplots()
fig.show()
a__ = ListedColormap(['''w''', '''k'''])
try:
while True:
a__ = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 14 | 1 |
from __future__ import annotations
from cmath import sqrt
def __UpperCAmelCase ( __a : int ,__a : int ,__a : int ) -> tuple[complex, complex]:
"""simple docstring"""
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
_a : Dict = b * b - 4 * a * c
_a : Union[str, Any] = (-b + sqrt(__a )) / (2 * a)
_a : Optional[int] = (-b - sqrt(__a )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a , _a : Optional[int] = quadratic_roots(a=5 ,b=6 ,c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 14 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/config.json''',
'''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json''',
'''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/config.json''',
'''funnel-transformer/medium-base''': '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json''',
'''funnel-transformer/intermediate''': (
'''https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'''
),
'''funnel-transformer/intermediate-base''': (
'''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'''
),
'''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/config.json''',
'''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json''',
'''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json''',
'''funnel-transformer/xlarge-base''': '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = "funnel"
UpperCAmelCase__ : Tuple = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self , _a=3_0_5_2_2 , _a=[4, 4, 4] , _a=None , _a=2 , _a=7_6_8 , _a=1_2 , _a=6_4 , _a=3_0_7_2 , _a="gelu_new" , _a=0.1 , _a=0.1 , _a=0.0 , _a=0.1 , _a=None , _a=1e-9 , _a="mean" , _a="relative_shift" , _a=True , _a=True , _a=True , **_a , ) -> List[Any]:
_a : Optional[int] = vocab_size
_a : Dict = block_sizes
_a : Optional[int] = [1] * len(_a ) if block_repeats is None else block_repeats
assert len(_a ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
_a : int = num_decoder_layers
_a : List[str] = d_model
_a : Optional[Any] = n_head
_a : Tuple = d_head
_a : Dict = d_inner
_a : List[str] = hidden_act
_a : int = hidden_dropout
_a : Union[str, Any] = attention_dropout
_a : Tuple = activation_dropout
_a : Optional[Any] = initializer_range
_a : Dict = initializer_std
_a : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
_a : Any = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
_a : Optional[Any] = attention_type
_a : int = separate_cls
_a : Tuple = truncate_seq
_a : List[Any] = pool_q_only
super().__init__(**_a )
@property
def __lowercase ( self ) -> Tuple:
return sum(self.block_sizes )
@num_hidden_layers.setter
def __lowercase ( self , _a ) -> List[str]:
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def __lowercase ( self ) -> Optional[int]:
return len(self.block_sizes )
@num_blocks.setter
def __lowercase ( self , _a ) -> Dict:
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 14 | 1 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
a__ = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def __UpperCAmelCase ( __a : Tuple ) -> Tuple:
"""simple docstring"""
_a : Dict = {}
state_dict.pop('''pixel_mean''' ,__a )
state_dict.pop('''pixel_std''' ,__a )
_a : Any = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
_a : Tuple = key.replace(__a ,__a )
if re.match(__a ,__a ):
_a : Tuple = int(re.match(__a ,__a ).group(2 ) )
if layer_nb == 0:
_a : Any = key.replace('''layers.0''' ,'''proj_in''' )
elif layer_nb == 1:
_a : List[Any] = key.replace('''layers.1''' ,'''layers.0''' )
elif layer_nb == 2:
_a : Union[str, Any] = key.replace('''layers.2''' ,'''proj_out''' )
_a : List[str] = value
_a : Tuple = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def __UpperCAmelCase ( __a : Optional[Any] ,__a : List[Any] ,__a : List[str] ,__a : Dict="ybelkada/segment-anything" ) -> Union[str, Any]:
"""simple docstring"""
_a : List[str] = hf_hub_download(__a ,F"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
_a : List[str] = SamConfig()
elif "sam_vit_l" in model_name:
_a : str = SamVisionConfig(
hidden_size=1_024 ,num_hidden_layers=24 ,num_attention_heads=16 ,global_attn_indexes=[5, 11, 17, 23] ,)
_a : List[Any] = SamConfig(
vision_config=__a ,)
elif "sam_vit_h" in model_name:
_a : Any = SamVisionConfig(
hidden_size=1_280 ,num_hidden_layers=32 ,num_attention_heads=16 ,global_attn_indexes=[7, 15, 23, 31] ,)
_a : Dict = SamConfig(
vision_config=__a ,)
_a : int = torch.load(__a ,map_location='''cpu''' )
_a : List[str] = replace_keys(__a )
_a : List[Any] = SamImageProcessor()
_a : Optional[int] = SamProcessor(image_processor=__a )
_a : str = SamModel(__a )
hf_model.load_state_dict(__a )
_a : Optional[Any] = hf_model.to('''cuda''' )
_a : Union[str, Any] = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
_a : List[str] = Image.open(requests.get(__a ,stream=__a ).raw ).convert('''RGB''' )
_a : Dict = [[[400, 650]]]
_a : Dict = [[1]]
_a : int = processor(images=np.array(__a ) ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_a : List[Any] = hf_model(**__a )
_a : List[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
_a : Optional[int] = processor(
images=np.array(__a ) ,input_points=__a ,input_labels=__a ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_a : Tuple = hf_model(**__a )
_a : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
_a : List[Any] = ((75, 275, 1_725, 850),)
_a : Optional[Any] = processor(images=np.array(__a ) ,input_boxes=__a ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_a : Union[str, Any] = hf_model(**__a )
_a : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
_a : List[str] = [[[400, 650], [800, 650]]]
_a : Optional[Any] = [[1, 1]]
_a : List[str] = processor(
images=np.array(__a ) ,input_points=__a ,input_labels=__a ,return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
_a : Optional[Any] = hf_model(**__a )
_a : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
a__ = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
a__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 14 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : str = LDMTextToImagePipeline
UpperCAmelCase__ : Tuple = TEXT_TO_IMAGE_PARAMS - {
"negative_prompt",
"negative_prompt_embeds",
"cross_attention_kwargs",
"prompt_embeds",
}
UpperCAmelCase__ : Any = PipelineTesterMixin.required_optional_params - {
"num_images_per_prompt",
"callback",
"callback_steps",
}
UpperCAmelCase__ : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCAmelCase__ : Tuple = False
def __lowercase ( self ) -> Dict:
torch.manual_seed(0 )
_a : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
_a : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_a , set_alpha_to_one=_a , )
torch.manual_seed(0 )
_a : List[Any] = AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , latent_channels=4 , )
torch.manual_seed(0 )
_a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_a : int = CLIPTextModel(_a )
_a : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_a : List[Any] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vqvae''': vae,
'''bert''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def __lowercase ( self , _a , _a=0 ) -> Optional[int]:
if str(_a ).startswith('''mps''' ):
_a : List[str] = torch.manual_seed(_a )
else:
_a : int = torch.Generator(device=_a ).manual_seed(_a )
_a : Optional[int] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self ) -> Any:
_a : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_a : int = self.get_dummy_components()
_a : Any = LDMTextToImagePipeline(**_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[Any] = self.get_dummy_inputs(_a )
_a : List[str] = pipe(**_a ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
_a : List[Any] = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , _a , _a=torch.floataa , _a=0 ) -> str:
_a : Any = torch.manual_seed(_a )
_a : Dict = np.random.RandomState(_a ).standard_normal((1, 4, 3_2, 3_2) )
_a : List[Any] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_a : Union[str, Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self ) -> str:
_a : Any = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Optional[int] = self.get_inputs(_a )
_a : Optional[Any] = pipe(**_a ).images
_a : Union[str, Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
_a : List[str] = np.array([0.5_1825, 0.5_2850, 0.5_2543, 0.5_4258, 0.5_2304, 0.5_2569, 0.5_4363, 0.5_5276, 0.5_6878] )
_a : List[Any] = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self , _a , _a=torch.floataa , _a=0 ) -> int:
_a : Union[str, Any] = torch.manual_seed(_a )
_a : str = np.random.RandomState(_a ).standard_normal((1, 4, 3_2, 3_2) )
_a : List[Any] = torch.from_numpy(_a ).to(device=_a , dtype=_a )
_a : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 5_0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def __lowercase ( self ) -> List[str]:
_a : str = LDMTextToImagePipeline.from_pretrained('''CompVis/ldm-text2im-large-256''' ).to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : List[str] = self.get_inputs(_a )
_a : int = pipe(**_a ).images[0]
_a : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy''' )
_a : Dict = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 14 |
a__ = '''Input must be a string of 8 numbers plus letter'''
a__ = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def __UpperCAmelCase ( __a : str ) -> bool:
"""simple docstring"""
if not isinstance(__a ,__a ):
_a : List[str] = F"""Expected string as input, found {type(__a ).__name__}"""
raise TypeError(__a )
_a : List[Any] = spanish_id.replace('''-''' ,'''''' ).upper()
if len(__a ) != 9:
raise ValueError(__a )
try:
_a : Any = int(spanish_id_clean[0:8] )
_a : str = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__a ) from ex
if letter.isdigit():
raise ValueError(__a )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = (DPMSolverSinglestepScheduler,)
UpperCAmelCase__ : Any = (("num_inference_steps", 25),)
def __lowercase ( self , **_a ) -> List[str]:
_a : List[str] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
'''sample_max_value''': 1.0,
'''algorithm_type''': '''dpmsolver++''',
'''solver_type''': '''midpoint''',
'''lambda_min_clipped''': -float('''inf''' ),
'''variance_type''': None,
}
config.update(**_a )
return config
def __lowercase ( self , _a=0 , **_a ) -> Any:
_a : List[str] = dict(self.forward_default_kwargs )
_a : Optional[Any] = kwargs.pop('''num_inference_steps''' , _a )
_a : Dict = self.dummy_sample
_a : Any = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : List[str] = self.get_scheduler_config(**_a )
_a : str = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals
_a : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_a : List[str] = scheduler_class.from_pretrained(_a )
new_scheduler.set_timesteps(_a )
# copy over dummy past residuals
_a : int = dummy_past_residuals[: new_scheduler.config.solver_order]
_a , _a : Any = sample, sample
for t in range(_a , time_step + scheduler.config.solver_order + 1 ):
_a : Dict = scheduler.step(_a , _a , _a , **_a ).prev_sample
_a : List[str] = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self ) -> Dict:
pass
def __lowercase ( self , _a=0 , **_a ) -> List[Any]:
_a : List[str] = dict(self.forward_default_kwargs )
_a : Union[str, Any] = kwargs.pop('''num_inference_steps''' , _a )
_a : Union[str, Any] = self.dummy_sample
_a : List[str] = 0.1 * sample
_a : Optional[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_a : int = self.get_scheduler_config()
_a : Optional[int] = scheduler_class(**_a )
scheduler.set_timesteps(_a )
# copy over dummy past residuals (must be after setting timesteps)
_a : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_a )
_a : str = scheduler_class.from_pretrained(_a )
# copy over dummy past residuals
new_scheduler.set_timesteps(_a )
# copy over dummy past residual (must be after setting timesteps)
_a : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_a : int = scheduler.step(_a , _a , _a , **_a ).prev_sample
_a : int = new_scheduler.step(_a , _a , _a , **_a ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __lowercase ( self , _a=None , **_a ) -> Dict:
if scheduler is None:
_a : Optional[Any] = self.scheduler_classes[0]
_a : List[str] = self.get_scheduler_config(**_a )
_a : Union[str, Any] = scheduler_class(**_a )
_a : int = self.scheduler_classes[0]
_a : List[Any] = self.get_scheduler_config(**_a )
_a : Union[str, Any] = scheduler_class(**_a )
_a : Optional[Any] = 1_0
_a : List[Any] = self.dummy_model()
_a : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_a : Any = model(_a , _a )
_a : Dict = scheduler.step(_a , _a , _a ).prev_sample
return sample
def __lowercase ( self ) -> Dict:
_a : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : Union[str, Any] = 5_0
_a : Any = self.dummy_model()
_a : Tuple = self.dummy_sample_deter
scheduler.set_timesteps(_a )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_a : List[Any] = model(_a , _a )
_a : Optional[int] = scheduler.step(_a , _a , _a ).prev_sample
_a : Union[str, Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def __lowercase ( self ) -> int:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=_a )
def __lowercase ( self ) -> List[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_a : Dict = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_a : Dict = self.full_loop(scheduler=_a )
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
_a : int = DEISMultistepScheduler.from_config(scheduler.config )
_a : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_a : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_a : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_a : Optional[int] = self.full_loop(scheduler=_a )
_a : List[str] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __lowercase ( self ) -> Union[str, Any]:
self.check_over_configs(thresholding=_a )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , algorithm_type='''dpmsolver++''' , solver_order=_a , solver_type=_a , )
def __lowercase ( self ) -> Tuple:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __lowercase ( self ) -> Dict:
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
_a : List[Any] = self.full_loop(
solver_order=_a , solver_type=_a , prediction_type=_a , algorithm_type=_a , )
assert not torch.isnan(_a ).any(), "Samples have nan numbers"
def __lowercase ( self ) -> List[Any]:
self.check_over_configs(lower_order_final=_a )
self.check_over_configs(lower_order_final=_a )
def __lowercase ( self ) -> List[Any]:
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __lowercase ( self ) -> int:
self.check_over_configs(variance_type=_a )
self.check_over_configs(variance_type='''learned_range''' )
def __lowercase ( self ) -> Optional[Any]:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=_a , time_step=0 )
def __lowercase ( self ) -> Optional[Any]:
_a : Any = self.full_loop()
_a : Optional[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def __lowercase ( self ) -> Optional[int]:
_a : Optional[Any] = self.full_loop(use_karras_sigmas=_a )
_a : int = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = self.full_loop(prediction_type='''v_prediction''' )
_a : Optional[int] = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def __lowercase ( self ) -> str:
_a : List[str] = self.full_loop(prediction_type='''v_prediction''' , use_karras_sigmas=_a )
_a : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def __lowercase ( self ) -> List[str]:
_a : Union[str, Any] = self.scheduler_classes[0]
_a : Dict = self.get_scheduler_config(thresholding=_a , dynamic_thresholding_ratio=0 )
_a : str = scheduler_class(**_a )
_a : Dict = 1_0
_a : Optional[Any] = self.dummy_model()
_a : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(_a )
for i, t in enumerate(scheduler.timesteps ):
_a : Any = model(_a , _a )
_a : int = scheduler.step(_a , _a , _a ).prev_sample
assert sample.dtype == torch.floataa
| 14 |
from random import randint
from tempfile import TemporaryFile
import numpy as np
def __UpperCAmelCase ( __a : Optional[Any] ,__a : int ,__a : Any ) -> int:
"""simple docstring"""
_a : int = 0
if start < end:
_a : Tuple = randint(__a ,__a )
_a : Tuple = a[end]
_a : List[str] = a[pivot]
_a : Any = temp
_a , _a : Optional[int] = _in_place_partition(__a ,__a ,__a )
count += _in_place_quick_sort(__a ,__a ,p - 1 )
count += _in_place_quick_sort(__a ,p + 1 ,__a )
return count
def __UpperCAmelCase ( __a : List[Any] ,__a : Tuple ,__a : Dict ) -> Dict:
"""simple docstring"""
_a : Dict = 0
_a : Tuple = randint(__a ,__a )
_a : List[Any] = a[end]
_a : str = a[pivot]
_a : str = temp
_a : Dict = start - 1
for index in range(__a ,__a ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_a : int = new_pivot_index + 1
_a : Any = a[new_pivot_index]
_a : Optional[int] = a[index]
_a : str = temp
_a : Union[str, Any] = a[new_pivot_index + 1]
_a : Tuple = a[end]
_a : Any = temp
return new_pivot_index + 1, count
a__ = TemporaryFile()
a__ = 100 # 1000 elements are to be sorted
a__ , a__ = 0, 1 # mean and standard deviation
a__ = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('''The array is''')
print(X)
outfile.seek(0) # using the same array
a__ = np.load(outfile)
a__ = len(M) - 1
a__ = _in_place_quick_sort(M, 0, r)
print(
'''No of Comparisons for 100 elements selected from a standard normal distribution'''
'''is :'''
)
print(z)
| 14 | 1 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ["image_processor", "tokenizer"]
UpperCAmelCase__ : List[str] = "LayoutLMv2ImageProcessor"
UpperCAmelCase__ : Any = ("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self , _a=None , _a=None , **_a ) -> str:
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _a , )
_a : Union[str, Any] = kwargs.pop('''feature_extractor''' )
_a : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_a , _a )
def __call__( self , _a , _a = None , _a = None , _a = None , _a = None , _a = True , _a = False , _a = None , _a = None , _a = 0 , _a = None , _a = None , _a = None , _a = False , _a = False , _a = False , _a = False , _a = True , _a = None , **_a , ) -> BatchEncoding:
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
_a : List[Any] = self.image_processor(images=_a , return_tensors=_a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_a , _a ):
_a : Dict = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a : List[str] = features['''words''']
_a : int = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=_a , add_special_tokens=_a , padding=_a , truncation=_a , max_length=_a , stride=_a , pad_to_multiple_of=_a , return_token_type_ids=_a , return_attention_mask=_a , return_overflowing_tokens=_a , return_special_tokens_mask=_a , return_offsets_mapping=_a , return_length=_a , verbose=_a , return_tensors=_a , **_a , )
# add pixel values
_a : int = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
_a : int = self.get_overflowing_images(_a , encoded_inputs['''overflow_to_sample_mapping'''] )
_a : Optional[int] = images
return encoded_inputs
def __lowercase ( self , _a , _a ) -> Any:
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a : Optional[int] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_a ) != len(_a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
F""" {len(_a )} and {len(_a )}""" )
return images_with_overflow
def __lowercase ( self , *_a , **_a ) -> List[str]:
return self.tokenizer.batch_decode(*_a , **_a )
def __lowercase ( self , *_a , **_a ) -> Union[str, Any]:
return self.tokenizer.decode(*_a , **_a )
@property
def __lowercase ( self ) -> List[Any]:
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def __lowercase ( self ) -> str:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _a , )
return self.image_processor_class
@property
def __lowercase ( self ) -> Union[str, Any]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _a , )
return self.image_processor
| 14 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = MgpstrTokenizer
UpperCAmelCase__ : int = False
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : List[Any] = False
def __lowercase ( self ) -> Any:
super().setUp()
# fmt: off
_a : Tuple = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
_a : Optional[int] = dict(zip(_a , range(len(_a ) ) ) )
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_a ) + '''\n''' )
def __lowercase ( self , **_a ) -> Dict:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def __lowercase ( self , _a ) -> Tuple:
_a : List[str] = '''tester'''
_a : Optional[Any] = '''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def __lowercase ( self ) -> Any:
pass
def __lowercase ( self ) -> Any:
_a : Union[str, Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a : int = '''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
_a : Tuple = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_a : Tuple = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def __lowercase ( self ) -> Tuple:
_a : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}""" ):
_a , _a : int = self.get_input_output_texts(_a )
_a : List[str] = tokenizer.tokenize(_a )
_a : Optional[int] = tokenizer.convert_tokens_to_ids(_a )
_a : Tuple = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Optional[int] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
_a : int = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , _a )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def __lowercase ( self ) -> List[str]:
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def __lowercase ( self ) -> Optional[Any]:
pass
| 14 | 1 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
a__ = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
a__ = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def __UpperCAmelCase ( __a : Tuple ,__a : Any ,__a : Optional[Any] ) -> int:
"""simple docstring"""
_a : Optional[Any] = SavedModel()
_a : List[str] = []
with open(os.path.join(__a ,'''utils''' ,'''tf_ops''' ,'''onnx.json''' ) ) as f:
_a : int = json.load(__a )['''opsets''']
for i in range(1 ,opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__a )] )
with open(__a ,'''rb''' ) as f:
saved_model.ParseFromString(f.read() )
_a : Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
_a : Union[str, Any] = sorted(__a )
_a : List[Any] = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__a )
if strict and len(__a ) > 0:
raise Exception(F"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__a ) > 0:
print(F"""Found the following incompatible ops for the opset {opset}:""" )
print(*__a ,sep='''\n''' )
else:
print(F"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
a__ = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 14 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> List[Any]:
_a : int = 0
def __lowercase ( self ) -> List[str]:
_a : Dict = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : Optional[Any] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
_a : Any = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Dict = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_a : Tuple = Path(_a ) / '''preprocessor_config.json'''
_a : List[str] = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_a : Tuple = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
_a : Tuple = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
_a : Optional[int] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Dict:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Optional[int] = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
_a : List[str] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def __lowercase ( self ) -> Any:
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
_a : Dict = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowercase ( self ) -> List[Any]:
with self.assertRaisesRegex(
_a , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : List[str] = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def __lowercase ( self ) -> Dict:
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self ) -> Union[str, Any]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
_a : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
_a : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def __lowercase ( self ) -> Dict:
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
_a : int = Path(_a ) / '''preprocessor_config.json'''
_a : int = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
_a : int = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
_a : Optional[Any] = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowercase ( self ) -> Union[str, Any]:
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
_a : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_a : int = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_a : Dict = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 14 | 1 |
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
a__ = ['''\nclass''', '''\ndef''', '''\n#''', '''\n@''', '''\nprint''', '''\nif''']
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a=None , _a=1 ) -> Optional[int]:
_a : List[Any] = tokenizer
_a : Dict = dataset
_a : str = len(_a ) if n_tasks is None else n_tasks
_a : Any = n_copies
def __iter__( self ) -> Optional[int]:
_a : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_a : Dict = self.tokenizer(_a , padding=_a , return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[Any]:
_a : Optional[int] = start_length
_a : Optional[int] = eof_strings
_a : Dict = tokenizer
def __call__( self , _a , _a , **_a ) -> Union[str, Any]:
_a : Any = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_a : Tuple = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(_a )
def __UpperCAmelCase ( __a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a : int = re.split('''(%s)''' % '''|'''.join(__a ) ,__a )
# last string should be ""
return "".join(string_list[:-2] )
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : int ,__a : List[Any] ,__a : List[Any] ,__a : Union[str, Any]=20 ,**__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : int = defaultdict(__a ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__a ) ):
with torch.no_grad():
_a : List[Any] = batch['''ids'''].shape[-1]
_a : List[str] = accelerator.unwrap_model(__a ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] ,num_return_sequences=__a ,**__a )
# each task is generated batch_size times
_a : str = batch['''task_id'''].repeat(__a )
_a : Dict = accelerator.pad_across_processes(
__a ,dim=1 ,pad_index=tokenizer.pad_token_id )
_a , _a : Tuple = accelerator.gather((generated_tokens, generated_tasks) )
_a : List[Any] = generated_tokens.cpu().numpy()
_a : Dict = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__a ,__a ):
gen_token_dict[task].append(__a )
_a : List[str] = [[] for _ in range(__a )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_a : str = tokenizer.decode(__a ,skip_special_tokens=__a ,clean_up_tokenization_spaces=__a )
code_gens[task].append(remove_last_block(__a ) )
return code_gens
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_a : Union[str, Any] = HfArgumentParser(__a )
_a : List[Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_a : Tuple = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_a : List[str] = '''false'''
if args.num_workers is None:
_a : Optional[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_a : str = Accelerator()
set_seed(args.seed ,device_specific=__a )
# Load model and tokenizer
_a : Union[str, Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_a : Dict = tokenizer.eos_token
_a : Tuple = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_a : Any = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 ,__a ,__a )] ),
}
# Load evaluation dataset and metric
_a : Optional[Any] = load_dataset('''openai_humaneval''' )
_a : int = load_metric('''code_eval''' )
_a : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_a : Optional[Any] = args.n_samples // args.batch_size
_a : List[Any] = TokenizedDataset(__a ,human_eval['''test'''] ,n_copies=__a ,n_tasks=__a )
# do not confuse args.batch_size, which is actually the num_return_sequences
_a : Union[str, Any] = DataLoader(__a ,batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_a : Optional[Any] = code_eval_metric.compute(references=[''''''] ,predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_a , _a : Optional[int] = accelerator.prepare(__a ,__a )
_a : Dict = complete_code(
__a ,__a ,__a ,__a ,n_tasks=__a ,batch_size=args.batch_size ,**__a ,)
if accelerator.is_main_process:
_a : Union[str, Any] = []
for task in tqdm(range(__a ) ):
_a : Any = human_eval['''test'''][task]['''test''']
_a : Any = F"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_a , _a : Optional[int] = code_eval_metric.compute(
references=__a ,predictions=__a ,num_workers=args.num_workers )
print(F"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file ,'''w''' ) as fp:
json.dump(__a ,__a )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 14 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCAmelCase__ : float
UpperCAmelCase__ : TreeNode | None = None
UpperCAmelCase__ : TreeNode | None = None
def __UpperCAmelCase ( __a : TreeNode | None ) -> bool:
"""simple docstring"""
def is_valid_tree(__a : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(__a ,__a ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(__a ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
__a : TreeNode | None ,__a : float ,__a : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left ,__a ,node.data )
and is_binary_search_tree_recursive_check(
node.right ,node.data ,__a )
)
return is_binary_search_tree_recursive_check(__a ,-float('''inf''' ) ,float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
def __UpperCAmelCase ( __a : str ) -> list:
"""simple docstring"""
if n_term == "":
return []
_a : list = []
for temp in range(int(__a ) ):
series.append(F"""1/{temp + 1}""" if series else '''1''' )
return series
if __name__ == "__main__":
a__ = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 14 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
a__ = numpy.array([0, 0])
a__ = numpy.array([0.5, 0.8660254])
a__ = numpy.array([1, 0])
a__ = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __UpperCAmelCase ( __a : list[numpy.ndarray] ,__a : int ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = initial_vectors
for _ in range(__a ):
_a : int = iteration_step(__a )
return vectors
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> list[numpy.ndarray]:
"""simple docstring"""
_a : Tuple = []
for i, start_vector in enumerate(vectors[:-1] ):
_a : str = vectors[i + 1]
new_vectors.append(__a )
_a : Optional[int] = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 ,60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __UpperCAmelCase ( __a : numpy.ndarray ,__a : float ) -> numpy.ndarray:
"""simple docstring"""
_a : Tuple = numpy.radians(__a )
_a , _a : List[Any] = numpy.cos(__a ), numpy.sin(__a )
_a : Dict = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__a ,__a )
def __UpperCAmelCase ( __a : list[numpy.ndarray] ) -> None:
"""simple docstring"""
_a : str = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
_a , _a : Optional[int] = zip(*__a )
plt.plot(__a ,__a )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 14 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue_model_parallelism.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "roberta-large",
"instance_type": "ml.p3dn.24xlarge",
"results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2},
},
] )
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> int:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_a , )
assert hasattr(self , '''env''' )
def __lowercase ( self , _a ) -> List[str]:
# configuration for running training on smdistributed Model Parallel
_a : List[Any] = {
'''enabled''': True,
'''processes_per_host''': 8,
}
_a : str = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
_a : Any = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
_a : Optional[Any] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_a , instance_type=self.instance_type , debugger_hook_config=_a , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_0_0,
} , metric_definitions=self.env.metric_definitions , distribution=_a , py_version='''py36''' , )
def __lowercase ( self , _a ) -> str:
TrainingJobAnalytics(_a ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowercase ( self , _a ) -> Optional[int]:
# create estimator
_a : Any = self.create_estimator(_a )
# run training
estimator.fit()
# result dataframe
_a : int = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_a : Union[str, Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
_a : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_a : List[str] = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _a )
| 14 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 1 |
def __UpperCAmelCase ( __a : Optional[int] ,__a : List[str] ,__a : Tuple ,__a : Dict ) -> Any:
"""simple docstring"""
_a : List[str] = [False] * len(__a )
_a : List[Any] = []
queue.append(__a )
_a : Any = True
while queue:
_a : Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__a )
_a : Union[str, Any] = True
_a : str = u
return visited[t]
def __UpperCAmelCase ( __a : List[str] ,__a : str ,__a : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_a : Tuple = [-1] * (len(__a ))
_a : Optional[Any] = 0
while bfs(__a ,__a ,__a ,__a ):
_a : Any = float('''Inf''' )
_a : Union[str, Any] = sink
while s != source:
# Find the minimum value in select path
_a : Dict = min(__a ,graph[parent[s]][s] )
_a : List[str] = parent[s]
max_flow += path_flow
_a : List[str] = sink
while v != source:
_a : List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_a : int = parent[v]
return max_flow
a__ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
a__ , a__ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 14 |
from scipy.stats import spearmanr
import datasets
a__ = '''
The Spearman rank-order correlation coefficient is a measure of the
relationship between two datasets. Like other correlation coefficients,
this one varies between -1 and +1 with 0 implying no correlation.
Positive correlations imply that as data in dataset x increases, so
does data in dataset y. Negative correlations imply that as x increases,
y decreases. Correlations of -1 or +1 imply an exact monotonic relationship.
Unlike the Pearson correlation, the Spearman correlation does not
assume that both datasets are normally distributed.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
'''
a__ = '''
Args:
predictions (`List[float]`): Predicted labels, as returned by a model.
references (`List[float]`): Ground truth labels.
return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns
only the spearmanr score. Defaults to `False`.
Returns:
spearmanr (`float`): Spearman correlation coefficient.
p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.
Examples:
Example 1:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])
>>> print(results)
{\'spearmanr\': -0.7}
Example 2:
>>> spearmanr_metric = datasets.load_metric("spearmanr")
>>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],
... predictions=[10, 9, 2.5, 6, 4],
... return_pvalue=True)
>>> print(results[\'spearmanr\'])
-0.7
>>> print(round(results[\'spearmanr_pvalue\'], 2))
0.19
'''
a__ = R'''\
@book{kokoska2000crc,
title={CRC standard probability and statistics tables and formulae},
author={Kokoska, Stephen and Zwillinger, Daniel},
year={2000},
publisher={Crc Press}
}
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'''] , )
def __lowercase ( self , _a , _a , _a=False ) -> str:
_a : int = spearmanr(_a , _a )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 14 | 1 |
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : Optional[Any] ,__a : Dict=False ) -> Any:
"""simple docstring"""
_a : Dict = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('''head''' ):
_a : Dict = '''segformer.encoder.''' + key
if key.startswith('''backbone''' ):
_a : Union[str, Any] = key.replace('''backbone''' ,'''segformer.encoder''' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_a : Optional[int] = key[key.find('''patch_embed''' ) + len('''patch_embed''' )]
_a : List[str] = key.replace(F"""patch_embed{idx}""" ,F"""patch_embeddings.{int(__a )-1}""" )
if "norm" in key:
_a : List[str] = key.replace('''norm''' ,'''layer_norm''' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_a : int = key[key.find('''segformer.encoder.layer_norm''' ) + len('''segformer.encoder.layer_norm''' )]
_a : int = key.replace(F"""layer_norm{idx}""" ,F"""layer_norm.{int(__a )-1}""" )
if "layer_norm1" in key:
_a : Tuple = key.replace('''layer_norm1''' ,'''layer_norm_1''' )
if "layer_norm2" in key:
_a : List[Any] = key.replace('''layer_norm2''' ,'''layer_norm_2''' )
if "block" in key:
# replace for example block1 by block.0
_a : List[str] = key[key.find('''block''' ) + len('''block''' )]
_a : Tuple = key.replace(F"""block{idx}""" ,F"""block.{int(__a )-1}""" )
if "attn.q" in key:
_a : Dict = key.replace('''attn.q''' ,'''attention.self.query''' )
if "attn.proj" in key:
_a : Union[str, Any] = key.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in key:
_a : Any = key.replace('''attn''' ,'''attention.self''' )
if "fc1" in key:
_a : List[str] = key.replace('''fc1''' ,'''dense1''' )
if "fc2" in key:
_a : Any = key.replace('''fc2''' ,'''dense2''' )
if "linear_pred" in key:
_a : List[str] = key.replace('''linear_pred''' ,'''classifier''' )
if "linear_fuse" in key:
_a : List[Any] = key.replace('''linear_fuse.conv''' ,'''linear_fuse''' )
_a : List[str] = key.replace('''linear_fuse.bn''' ,'''batch_norm''' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_a : Dict = key[key.find('''linear_c''' ) + len('''linear_c''' )]
_a : Optional[Any] = key.replace(F"""linear_c{idx}""" ,F"""linear_c.{int(__a )-1}""" )
if key.startswith('''head''' ):
_a : List[str] = key.replace('''head''' ,'''classifier''' )
_a : Any = value
return new_state_dict
def __UpperCAmelCase ( __a : str ,__a : Union[str, Any] ) -> str:
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_a : Any = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.weight""" )
_a : List[str] = state_dict.pop(F"""segformer.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
_a : Tuple = kv_weight[
: config.hidden_sizes[i], :
]
_a : Tuple = kv_bias[: config.hidden_sizes[i]]
_a : str = kv_weight[
config.hidden_sizes[i] :, :
]
_a : Dict = kv_bias[
config.hidden_sizes[i] :
]
def __UpperCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_a : int = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return image
@torch.no_grad()
def __UpperCAmelCase ( __a : Dict ,__a : str ,__a : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = SegformerConfig()
_a : int = False
# set attributes based on model_name
_a : Union[str, Any] = '''huggingface/label-files'''
if "segformer" in model_name:
_a : List[str] = model_name[len('''segformer.''' ) : len('''segformer.''' ) + 2]
if "ade" in model_name:
_a : Optional[Any] = 150
_a : List[Any] = '''ade20k-id2label.json'''
_a : Any = (1, 150, 128, 128)
elif "city" in model_name:
_a : Tuple = 19
_a : List[str] = '''cityscapes-id2label.json'''
_a : int = (1, 19, 128, 128)
else:
raise ValueError(F"""Model {model_name} not supported""" )
elif "mit" in model_name:
_a : List[Any] = True
_a : Optional[Any] = model_name[4:6]
_a : Dict = 1_000
_a : Union[str, Any] = '''imagenet-1k-id2label.json'''
_a : Union[str, Any] = (1, 1_000)
else:
raise ValueError(F"""Model {model_name} not supported""" )
# set config attributes
_a : Optional[Any] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : int = {int(__a ): v for k, v in idalabel.items()}
_a : int = idalabel
_a : List[Any] = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_a : str = [64, 128, 320, 512]
_a : Optional[int] = 256
elif size == "b2":
_a : List[str] = [64, 128, 320, 512]
_a : Any = 768
_a : Dict = [3, 4, 6, 3]
elif size == "b3":
_a : Any = [64, 128, 320, 512]
_a : List[Any] = 768
_a : List[Any] = [3, 4, 18, 3]
elif size == "b4":
_a : Dict = [64, 128, 320, 512]
_a : int = 768
_a : List[Any] = [3, 8, 27, 3]
elif size == "b5":
_a : Any = [64, 128, 320, 512]
_a : List[str] = 768
_a : Optional[int] = [3, 6, 40, 3]
else:
raise ValueError(F"""Size {size} not supported""" )
# load image processor (only resize + normalize)
_a : str = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=__a ,align=__a ,do_random_crop=__a )
# prepare image
_a : List[Any] = prepare_img()
_a : Union[str, Any] = image_processor(images=__a ,return_tensors='''pt''' ).pixel_values
logger.info(F"""Converting model {model_name}...""" )
# load original state dict
if encoder_only:
_a : List[Any] = torch.load(__a ,map_location=torch.device('''cpu''' ) )
else:
_a : Any = torch.load(__a ,map_location=torch.device('''cpu''' ) )['''state_dict''']
# rename keys
_a : int = rename_keys(__a ,encoder_only=__a )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(__a ,__a )
# create HuggingFace model and load state dict
if encoder_only:
_a : Dict = False
_a : Optional[int] = SegformerForImageClassification(__a )
else:
_a : List[str] = SegformerForSemanticSegmentation(__a )
model.load_state_dict(__a )
model.eval()
# forward pass
_a : int = model(__a )
_a : List[Any] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_a : Optional[int] = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_a : Optional[int] = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_a : Union[str, Any] = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_a : List[Any] = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_a : List[str] = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_a : List[str] = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_a : Tuple = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_a : Tuple = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_a : Optional[int] = torch.tensor(
[
[
[-1.1_372E01, -1.2_787E01, -1.3_477E01],
[-1.2_536E01, -1.4_194E01, -1.4_409E01],
[-1.3_217E01, -1.4_888E01, -1.5_327E01],
],
[
[-1.4_791E01, -1.7_122E01, -1.8_277E01],
[-1.7_163E01, -1.9_192E01, -1.9_533E01],
[-1.7_897E01, -1.9_991E01, -2.0_315E01],
],
[
[7.6_723E-01, 4.1_921E-01, -7.7_878E-02],
[4.7_772E-01, 9.5_557E-03, -2.8_082E-01],
[3.6_032E-01, -2.4_826E-01, -5.1_168E-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_a : Optional[Any] = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_a : str = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_a : int = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_a : Dict = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_a : Tuple = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_a : int = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_a : List[Any] = logits.argmax(-1 ).item()
print('''Predicted class:''' ,model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] ,__a ,atol=1E-2 )
# finally, save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
a__ = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 14 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __UpperCAmelCase ( __a : bytes ,__a : int ) -> np.array:
"""simple docstring"""
_a : int = F"""{sampling_rate}"""
_a : str = '''1'''
_a : Optional[int] = '''f32le'''
_a : Optional[Any] = [
'''ffmpeg''',
'''-i''',
'''pipe:0''',
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
try:
with subprocess.Popen(__a ,stdin=subprocess.PIPE ,stdout=subprocess.PIPE ) as ffmpeg_process:
_a : Any = ffmpeg_process.communicate(__a )
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to load audio files from filename''' ) from error
_a : Optional[Any] = output_stream[0]
_a : Optional[int] = np.frombuffer(__a ,np.floataa )
if audio.shape[0] == 0:
raise ValueError('''Malformed soundfile''' )
return audio
def __UpperCAmelCase ( __a : int ,__a : float ,__a : str = "f32le" ,) -> str:
"""simple docstring"""
_a : Dict = F"""{sampling_rate}"""
_a : Optional[Any] = '''1'''
if format_for_conversion == "s16le":
_a : Dict = 2
elif format_for_conversion == "f32le":
_a : Optional[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Dict = platform.system()
if system == "Linux":
_a : Dict = '''alsa'''
_a : Union[str, Any] = '''default'''
elif system == "Darwin":
_a : Union[str, Any] = '''avfoundation'''
_a : List[str] = ''':0'''
elif system == "Windows":
_a : Optional[int] = '''dshow'''
_a : str = '''default'''
_a : Tuple = [
'''ffmpeg''',
'''-f''',
format_,
'''-i''',
input_,
'''-ac''',
ac,
'''-ar''',
ar,
'''-f''',
format_for_conversion,
'''-fflags''',
'''nobuffer''',
'''-hide_banner''',
'''-loglevel''',
'''quiet''',
'''pipe:1''',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : str = _ffmpeg_stream(__a ,__a )
for item in iterator:
yield item
def __UpperCAmelCase ( __a : int ,__a : float ,__a : Optional[int] = None ,__a : Optional[Union[Tuple[float, float], float]] = None ,__a : str = "f32le" ,) -> Optional[int]:
"""simple docstring"""
if stream_chunk_s is not None:
_a : Tuple = stream_chunk_s
else:
_a : Tuple = chunk_length_s
_a : Tuple = ffmpeg_microphone(__a ,__a ,format_for_conversion=__a )
if format_for_conversion == "s16le":
_a : Any = np.intaa
_a : Optional[int] = 2
elif format_for_conversion == "f32le":
_a : Dict = np.floataa
_a : List[Any] = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : List[Any] = chunk_length_s / 6
_a : Optional[int] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__a ,(int, float) ):
_a : Optional[Any] = [stride_length_s, stride_length_s]
_a : Optional[Any] = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : str = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Optional[Any] = datetime.datetime.now()
_a : Tuple = datetime.timedelta(seconds=__a )
for item in chunk_bytes_iter(__a ,__a ,stride=(stride_left, stride_right) ,stream=__a ):
# Put everything back in numpy scale
_a : Dict = np.frombuffer(item['''raw'''] ,dtype=__a )
_a : Dict = (
item['''stride'''][0] // size_of_sample,
item['''stride'''][1] // size_of_sample,
)
_a : str = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __UpperCAmelCase ( __a : Optional[int] ,__a : int ,__a : Tuple[int, int] ,__a : bool = False ) -> Optional[int]:
"""simple docstring"""
_a : Any = b''''''
_a , _a : List[str] = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : List[str] = 0
for raw in iterator:
acc += raw
if stream and len(__a ) < chunk_len:
_a : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__a ) >= chunk_len:
# We are flushing the accumulator
_a : List[str] = (_stride_left, stride_right)
_a : List[Any] = {'''raw''': acc[:chunk_len], '''stride''': stride}
if stream:
_a : List[Any] = False
yield item
_a : Optional[Any] = stride_left
_a : Optional[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__a ) > stride_left:
_a : Optional[Any] = {'''raw''': acc, '''stride''': (_stride_left, 0)}
if stream:
_a : Dict = False
yield item
def __UpperCAmelCase ( __a : int ,__a : int ) -> Tuple:
"""simple docstring"""
_a : Dict = 2**24 # 16Mo
try:
with subprocess.Popen(__a ,stdout=subprocess.PIPE ,bufsize=__a ) as ffmpeg_process:
while True:
_a : int = ffmpeg_process.stdout.read(__a )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('''ffmpeg was not found but is required to stream audio files from filename''' ) from error
| 14 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
a__ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
a__ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __UpperCAmelCase ( __a : Tuple ,__a : Any ,__a : Dict ,__a : List[str] ,__a : Tuple ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('''.''' ):
_a : Tuple = getattr(__a ,__a )
if weight_type is not None:
_a : Union[str, Any] = getattr(__a ,__a ).shape
else:
_a : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_a : str = value
elif weight_type == "weight_g":
_a : str = value
elif weight_type == "weight_v":
_a : Optional[int] = value
elif weight_type == "bias":
_a : List[str] = value
elif weight_type == "running_mean":
_a : List[str] = value
elif weight_type == "running_var":
_a : List[str] = value
elif weight_type == "num_batches_tracked":
_a : Tuple = value
elif weight_type == "inv_freq":
_a : Tuple = value
else:
_a : Tuple = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __UpperCAmelCase ( __a : Tuple ,__a : str ,__a : Tuple ) -> Any:
"""simple docstring"""
_a : str = []
_a : str = fairseq_model.state_dict()
_a : Optional[Any] = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_a : Any = False
if "conv_layers" in name:
load_conv_layer(
__a ,__a ,__a ,__a ,hf_model.config.feat_extract_norm == '''group''' ,)
_a : List[Any] = True
else:
for key, mapped_key in MAPPING.items():
_a : List[Any] = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
_a : Optional[Any] = True
if "*" in mapped_key:
_a : List[Any] = name.split(__a )[0].split('''.''' )[-2]
_a : Optional[Any] = mapped_key.replace('''*''' ,__a )
if "pos_bias_u" in name:
_a : List[str] = None
elif "pos_bias_v" in name:
_a : Tuple = None
elif "weight_g" in name:
_a : List[str] = '''weight_g'''
elif "weight_v" in name:
_a : Optional[int] = '''weight_v'''
elif "bias" in name:
_a : Tuple = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_a : int = '''weight'''
elif "running_mean" in name:
_a : Any = '''running_mean'''
elif "inv_freq" in name:
_a : Dict = '''inv_freq'''
elif "running_var" in name:
_a : int = '''running_var'''
elif "num_batches_tracked" in name:
_a : Optional[Any] = '''num_batches_tracked'''
else:
_a : List[str] = None
set_recursively(__a ,__a ,__a ,__a ,__a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __UpperCAmelCase ( __a : int ,__a : str ,__a : Optional[int] ,__a : List[str] ,__a : List[str] ) -> List[Any]:
"""simple docstring"""
_a : Tuple = full_name.split('''conv_layers.''' )[-1]
_a : List[Any] = name.split('''.''' )
_a : Union[str, Any] = int(items[0] )
_a : Union[str, Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_a : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_a : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_a : int = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__a )
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : str ,__a : Optional[int]=None ,__a : Dict=None ,__a : str=True ) -> Optional[int]:
"""simple docstring"""
if config_path is not None:
_a : Any = WavaVecaConformerConfig.from_pretrained(__a ,hidden_act='''swish''' )
else:
_a : str = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_a : Optional[Any] = '''rotary'''
if is_finetuned:
if dict_path:
_a : Dict = Dictionary.load(__a )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_a : int = target_dict.pad_index
_a : Optional[int] = target_dict.bos_index
_a : List[str] = target_dict.eos_index
_a : Any = len(target_dict.symbols )
_a : Any = os.path.join(__a ,'''vocab.json''' )
if not os.path.isdir(__a ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__a ) )
return
os.makedirs(__a ,exist_ok=__a )
_a : Optional[int] = target_dict.indices
# fairseq has the <pad> and <s> switched
_a : Tuple = 0
_a : Dict = 1
with open(__a ,'''w''' ,encoding='''utf-8''' ) as vocab_handle:
json.dump(__a ,__a )
_a : Tuple = WavaVecaCTCTokenizer(
__a ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='''|''' ,do_lower_case=__a ,)
_a : Tuple = True if config.feat_extract_norm == '''layer''' else False
_a : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=__a ,return_attention_mask=__a ,)
_a : Any = WavaVecaProcessor(feature_extractor=__a ,tokenizer=__a )
processor.save_pretrained(__a )
_a : Optional[Any] = WavaVecaConformerForCTC(__a )
else:
_a : Union[str, Any] = WavaVecaConformerForPreTraining(__a )
if is_finetuned:
_a , _a , _a : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
_a : Union[str, Any] = argparse.Namespace(task='''audio_pretraining''' )
_a : Dict = fairseq.tasks.setup_task(__a )
_a , _a , _a : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ,task=__a )
_a : List[str] = model[0].eval()
recursively_load_weights(__a ,__a ,not is_finetuned )
hf_wavavec.save_pretrained(__a )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
a__ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Union[str, Any] = KandinskyInpaintPipeline
UpperCAmelCase__ : Optional[int] = ["prompt", "image_embeds", "negative_image_embeds", "image", "mask_image"]
UpperCAmelCase__ : Optional[Any] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
"mask_image",
]
UpperCAmelCase__ : Optional[int] = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
UpperCAmelCase__ : Any = False
@property
def __lowercase ( self ) -> Optional[int]:
return 3_2
@property
def __lowercase ( self ) -> int:
return 3_2
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim
@property
def __lowercase ( self ) -> List[str]:
return self.time_input_dim * 4
@property
def __lowercase ( self ) -> Optional[Any]:
return 1_0_0
@property
def __lowercase ( self ) -> Optional[Any]:
_a : Any = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
_a : Optional[int] = MultilingualCLIP(_a )
_a : Tuple = text_encoder.eval()
return text_encoder
@property
def __lowercase ( self ) -> str:
torch.manual_seed(0 )
_a : List[str] = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
_a : Dict = UNetaDConditionModel(**_a )
return model
@property
def __lowercase ( self ) -> Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowercase ( self ) -> Tuple:
torch.manual_seed(0 )
_a : List[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowercase ( self ) -> Any:
_a : List[Any] = self.dummy_text_encoder
_a : Optional[Any] = self.dummy_tokenizer
_a : Optional[Any] = self.dummy_unet
_a : Union[str, Any] = self.dummy_movq
_a : Tuple = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule='''linear''' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_a , set_alpha_to_one=_a , steps_offset=1 , prediction_type='''epsilon''' , thresholding=_a , )
_a : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowercase ( self , _a , _a=0 ) -> int:
_a : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_a ) ).to(_a )
_a : List[str] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_a )
# create init_image
_a : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(_a ) ).to(_a )
_a : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_a ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
# create mask
_a : Union[str, Any] = np.ones((6_4, 6_4) , dtype=np.floataa )
_a : List[str] = 0
if str(_a ).startswith('''mps''' ):
_a : Tuple = torch.manual_seed(_a )
else:
_a : Any = torch.Generator(device=_a ).manual_seed(_a )
_a : Any = {
'''prompt''': '''horse''',
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowercase ( self ) -> Optional[Any]:
_a : Optional[Any] = '''cpu'''
_a : List[Any] = self.get_dummy_components()
_a : Tuple = self.pipeline_class(**_a )
_a : int = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
_a : Any = pipe(**self.get_dummy_inputs(_a ) )
_a : str = output.images
_a : Tuple = pipe(
**self.get_dummy_inputs(_a ) , return_dict=_a , )[0]
_a : Union[str, Any] = image[0, -3:, -3:, -1]
_a : Tuple = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 6_4, 6_4, 3)
_a : str = np.array(
[0.832_6919, 0.7379_0467, 0.2091_8581, 0.930_9612, 0.551_1791, 0.4371_3328, 0.551_3321, 0.4992_2934, 0.5949_7786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowercase ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self ) -> str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self ) -> Union[str, Any]:
_a : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy''' )
_a : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
_a : Tuple = np.ones((7_6_8, 7_6_8) , dtype=np.floataa )
_a : Any = 0
_a : Optional[Any] = '''a hat'''
_a : Optional[Any] = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_a )
_a : Tuple = KandinskyInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-inpaint''' , torch_dtype=torch.floataa )
_a : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
_a : Union[str, Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
_a , _a : Dict = pipe_prior(
_a , generator=_a , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
_a : Optional[int] = pipeline(
_a , image=_a , mask_image=_a , image_embeds=_a , negative_image_embeds=_a , generator=_a , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , output_type='''np''' , )
_a : Optional[int] = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_a , _a )
| 14 | 1 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
a__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
'''text-classification''',
'''language-modeling''',
'''summarization''',
'''token-classification''',
'''question-answering''',
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
a__ = logging.getLogger()
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_a : List[str] = parser.parse_args()
return args.f
def __UpperCAmelCase ( __a : str ,__a : Optional[int]="eval" ) -> int:
"""simple docstring"""
_a : Optional[int] = os.path.join(__a ,F"""{split}_results.json""" )
if os.path.exists(__a ):
with open(__a ,'''r''' ) as f:
return json.load(__a )
raise ValueError(F"""can't find {path}""" )
a__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Optional[int]:
_a : List[str] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_glue.main()
_a : List[Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
@slow
def __lowercase ( self ) -> int:
_a : Optional[Any] = self.get_auto_remove_tmp_dir()
_a : Union[str, Any] = F"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_clm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def __lowercase ( self ) -> List[Any]:
_a : str = self.get_auto_remove_tmp_dir()
_a : List[Any] = F"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(_a , '''argv''' , _a ):
run_summarization_flax.main()
_a : Optional[Any] = get_results(_a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def __lowercase ( self ) -> Dict:
_a : str = self.get_auto_remove_tmp_dir()
_a : str = F"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_mlm_flax.main()
_a : List[Any] = get_results(_a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def __lowercase ( self ) -> Union[str, Any]:
_a : List[Any] = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(_a , '''argv''' , _a ):
run_ta_mlm_flax.main()
_a : Union[str, Any] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.42 )
@slow
def __lowercase ( self ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
_a : Optional[int] = 7 if get_gpu_count() > 1 else 2
_a : str = self.get_auto_remove_tmp_dir()
_a : int = F"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(_a , '''argv''' , _a ):
run_flax_ner.main()
_a : List[str] = get_results(_a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def __lowercase ( self ) -> Optional[Any]:
_a : Tuple = self.get_auto_remove_tmp_dir()
_a : Tuple = F"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(_a , '''argv''' , _a ):
run_qa.main()
_a : Any = get_results(_a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 14 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def __UpperCAmelCase ( __a : Any ) -> List[Any]:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
a__ = parser.parse_args()
a__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 14 | 1 |
from graphs.minimum_spanning_tree_kruskal import kruskal
def __UpperCAmelCase ( ) -> Optional[int]:
"""simple docstring"""
_a : Union[str, Any] = 9
_a : str = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
_a : List[str] = kruskal(__a ,__a )
_a : Optional[int] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__a ) == sorted(__a )
| 14 |
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a , _a ) -> List[str]:
_a : List[Any] = name
_a : List[str] = value
_a : List[str] = weight
def __repr__( self ) -> Optional[int]:
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def __lowercase ( self ) -> List[Any]:
return self.value
def __lowercase ( self ) -> int:
return self.name
def __lowercase ( self ) -> Optional[int]:
return self.weight
def __lowercase ( self ) -> Optional[Any]:
return self.value / self.weight
def __UpperCAmelCase ( __a : Optional[int] ,__a : Tuple ,__a : List[str] ) -> List[str]:
"""simple docstring"""
_a : Optional[int] = []
for i in range(len(__a ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __UpperCAmelCase ( __a : int ,__a : Union[str, Any] ,__a : int ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = sorted(__a ,key=__a ,reverse=__a )
_a : Any = []
_a , _a : Optional[int] = 0.0, 0.0
for i in range(len(__a ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __UpperCAmelCase ( ) -> int:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 14 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3_0 , _a=2 , _a=3 , _a=True , _a=True , _a=3_2 , _a=5 , _a=4 , _a=3_7 , _a="gelu" , _a=0.1 , _a=0.1 , _a=1_0 , _a=0.02 , _a=None , _a=2 , ) -> List[Any]:
_a : List[Any] = parent
_a : Tuple = batch_size
_a : Any = image_size
_a : Tuple = patch_size
_a : List[Any] = num_channels
_a : int = is_training
_a : List[str] = use_labels
_a : List[str] = hidden_size
_a : Any = num_hidden_layers
_a : Dict = num_attention_heads
_a : List[Any] = intermediate_size
_a : Optional[int] = hidden_act
_a : str = hidden_dropout_prob
_a : int = attention_probs_dropout_prob
_a : Tuple = type_sequence_label_size
_a : int = initializer_range
_a : str = scope
_a : List[str] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : Optional[Any] = (image_size // patch_size) ** 2
_a : Union[str, Any] = num_patches + 1
def __lowercase ( self ) -> Any:
_a : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : List[Any] = None
if self.use_labels:
_a : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> List[Any]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __lowercase ( self , _a , _a , _a ) -> Any:
_a : Optional[int] = ViTModel(config=_a )
model.to(_a )
model.eval()
_a : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self , _a , _a , _a ) -> Dict:
_a : List[Any] = ViTForMaskedImageModeling(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_a : Tuple = 1
_a : List[str] = ViTForMaskedImageModeling(_a )
model.to(_a )
model.eval()
_a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Tuple = model(_a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __lowercase ( self , _a , _a , _a ) -> List[str]:
_a : List[str] = self.type_sequence_label_size
_a : int = ViTForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : List[Any] = 1
_a : List[Any] = ViTForImageClassification(_a )
model.to(_a )
model.eval()
_a : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self ) -> int:
_a : int = self.prepare_config_and_inputs()
(
(
_a
) , (
_a
) , (
_a
) ,
) : int = config_and_inputs
_a : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ : List[str] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : str = True
UpperCAmelCase__ : List[str] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Union[str, Any] = False
def __lowercase ( self ) -> str:
_a : Dict = ViTModelTester(self )
_a : Optional[int] = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=3_7 )
def __lowercase ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __lowercase ( self ) -> Tuple:
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> Any:
_a , _a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : List[str] = [*signature.parameters.keys()]
_a : List[Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> Optional[Any]:
_a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> int:
_a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Union[str, Any]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ViTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> Union[str, Any]:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Tuple:
_a : str = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(_a )
_a : Optional[int] = self.default_image_processor
_a : List[str] = prepare_img()
_a : str = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : int = model(**_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : Union[str, Any] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> int:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_a : Any = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(_a )
_a : Dict = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=4_8_0 )
_a : Optional[Any] = prepare_img()
_a : int = image_processor(images=_a , return_tensors='''pt''' )
_a : List[Any] = inputs.pixel_values.to(_a )
# forward pass
with torch.no_grad():
_a : List[Any] = model(_a , interpolate_pos_encoding=_a )
# verify the logits
_a : Union[str, Any] = torch.Size((1, 3_6_0_1, 3_8_4) )
self.assertEqual(outputs.last_hidden_state.shape , _a )
_a : Union[str, Any] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(_a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __lowercase ( self ) -> Union[str, Any]:
_a : str = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
_a : Tuple = self.default_image_processor
_a : int = prepare_img()
_a : str = image_processor(images=_a , return_tensors='''pt''' )
_a : Dict = inputs.pixel_values.to(_a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_a : Any = model(_a )
| 14 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a , _a=1_3 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=2_2_4 , _a=1_0_0_0 , _a=[3, 3, 6, 4] , _a=[4_8, 5_6, 1_1_2, 2_2_0] , ) -> Tuple:
_a : Dict = parent
_a : Optional[int] = batch_size
_a : Optional[Any] = num_channels
_a : Union[str, Any] = is_training
_a : Tuple = use_labels
_a : Dict = hidden_dropout_prob
_a : List[Any] = attention_probs_dropout_prob
_a : Dict = num_labels
_a : List[str] = image_size
_a : Dict = layer_depths
_a : str = embed_dims
def __lowercase ( self ) -> Optional[Any]:
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : int = None
if self.use_labels:
_a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_a : Dict = self.get_config()
return config, pixel_values, labels
def __lowercase ( self ) -> int:
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act='''gelu''' , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_a , layer_scale_init_value=1e-5 , )
def __lowercase ( self , _a , _a , _a ) -> str:
_a : List[Any] = SwiftFormerModel(config=_a )
model.to(_a )
model.eval()
_a : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def __lowercase ( self , _a , _a , _a ) -> Optional[Any]:
_a : List[str] = self.num_labels
_a : Optional[int] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : List[str] = model(_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
_a : Union[str, Any] = SwiftFormerForImageClassification(_a )
model.to(_a )
model.eval()
_a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Optional[Any] = model(_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self ) -> Tuple:
((_a) , (_a) , (_a)) : Optional[int] = self.prepare_config_and_inputs()
_a : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[int] = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : str = False
def __lowercase ( self ) -> Optional[int]:
_a : Union[str, Any] = SwiftFormerModelTester(self )
_a : int = ConfigTester(
self , config_class=_a , has_text_modality=_a , hidden_size=3_7 , num_attention_heads=1_2 , num_hidden_layers=1_2 , )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''SwiftFormer does not use inputs_embeds''' )
def __lowercase ( self ) -> Union[str, Any]:
pass
def __lowercase ( self ) -> Dict:
_a , _a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Any = model_class(_a )
_a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a , nn.Linear ) )
def __lowercase ( self ) -> str:
_a , _a : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Optional[int] = model_class(_a )
_a : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Tuple = [*signature.parameters.keys()]
_a : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _a )
def __lowercase ( self ) -> int:
_a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def __lowercase ( self ) -> Optional[int]:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_a )
@slow
def __lowercase ( self ) -> Optional[Any]:
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = SwiftFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip(reason='''SwiftFormer does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> int:
def check_hidden_states_output(_a , _a , _a ):
_a : Optional[int] = model_class(_a )
model.to(_a )
model.eval()
with torch.no_grad():
_a : Union[str, Any] = model(**self._prepare_for_class(_a , _a ) )
_a : Optional[Any] = outputs.hidden_states
_a : Union[str, Any] = 8
self.assertEqual(len(_a ) , _a ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_a ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
_a , _a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : str = True
check_hidden_states_output(_a , _a , _a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : List[str] = True
check_hidden_states_output(_a , _a , _a )
def __lowercase ( self ) -> str:
def _config_zero_init(_a ):
_a : List[Any] = copy.deepcopy(_a )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_a , _a , 1e-1_0 )
if isinstance(getattr(_a , _a , _a ) , _a ):
_a : int = _config_zero_init(getattr(_a , _a ) )
setattr(_a , _a , _a )
return configs_no_init
_a , _a : Any = self.model_tester.prepare_config_and_inputs_for_common()
_a : Dict = _config_zero_init(_a )
for model_class in self.all_model_classes:
_a : Dict = model_class(config=_a )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def __lowercase ( self ) -> Optional[Any]:
pass
def __UpperCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
_a : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self ) -> str:
return ViTImageProcessor.from_pretrained('''MBZUAI/swiftformer-xs''' ) if is_vision_available() else None
@slow
def __lowercase ( self ) -> Dict:
_a : Any = SwiftFormerForImageClassification.from_pretrained('''MBZUAI/swiftformer-xs''' ).to(_a )
_a : Any = self.default_image_processor
_a : Any = prepare_img()
_a : Any = image_processor(images=_a , return_tensors='''pt''' ).to(_a )
# forward pass
with torch.no_grad():
_a : Optional[Any] = model(**_a )
# verify the logits
_a : List[str] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _a )
_a : int = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
| 14 | 1 |
from __future__ import annotations
def __UpperCAmelCase ( __a : list ,__a : int ,__a : int ,__a : int ) -> list:
"""simple docstring"""
_a : List[str] = []
_a , _a : int = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
_a : Optional[Any] = result + left + right
return input_list
def __UpperCAmelCase ( __a : list ) -> list:
"""simple docstring"""
if len(__a ) <= 1:
return input_list
_a : Union[str, Any] = list(__a )
# iteration for two-way merging
_a : Any = 2
while p <= len(__a ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 ,len(__a ) ,__a ):
_a : str = i
_a : Union[str, Any] = i + p - 1
_a : Any = (low + high + 1) // 2
_a : Tuple = merge(__a ,__a ,__a ,__a )
# final merge of last two parts
if p * 2 >= len(__a ):
_a : List[Any] = i
_a : Any = merge(__a ,0 ,__a ,len(__a ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
a__ = input('''Enter numbers separated by a comma:\n''').strip()
if user_input == "":
a__ = []
else:
a__ = [int(item.strip()) for item in user_input.split(''',''')]
print(iter_merge_sort(unsorted))
| 14 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
a__ = logging.get_logger(__name__)
def __UpperCAmelCase ( __a : str ) -> List[Any]:
"""simple docstring"""
_a : Tuple = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
_a : Dict = MaskFormerConfig(backbone_config=__a )
_a : Optional[Any] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
_a : Optional[Any] = 847
_a : List[Any] = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
_a : Union[str, Any] = 150
_a : Any = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
_a : int = 171
_a : List[str] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
_a : Dict = 133
_a : Optional[Any] = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
_a : List[Any] = 19
_a : Optional[Any] = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
_a : List[Any] = 65
_a : Dict = '''mapillary-vistas-id2label.json'''
_a : Optional[int] = json.load(open(hf_hub_download(__a ,__a ,repo_type='''dataset''' ) ,'''r''' ) )
_a : Tuple = {int(__a ): v for k, v in idalabel.items()}
return config
def __UpperCAmelCase ( __a : Optional[Any] ) -> Tuple:
"""simple docstring"""
_a : Optional[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 ,0 ,-1 ) ,range(0 ,3 ) ):
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") )
rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ,__a : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : str = val
def __UpperCAmelCase ( __a : List[Any] ,__a : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_a : Union[str, Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_a : Optional[Any] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
_a : Optional[int] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[int] = in_proj_weight[:dim, :]
_a : List[Any] = in_proj_bias[: dim]
_a : Optional[int] = in_proj_weight[
dim : dim * 2, :
]
_a : Tuple = in_proj_bias[
dim : dim * 2
]
_a : int = in_proj_weight[
-dim :, :
]
_a : Optional[int] = in_proj_bias[-dim :]
# fmt: on
def __UpperCAmelCase ( __a : List[str] ,__a : List[Any] ) -> List[Any]:
"""simple docstring"""
_a : Optional[int] = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : Union[str, Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Union[str, Any] = in_proj_weight[: hidden_size, :]
_a : List[Any] = in_proj_bias[:config.hidden_size]
_a : Dict = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Any = in_proj_bias[hidden_size : hidden_size * 2]
_a : Tuple = in_proj_weight[-hidden_size :, :]
_a : List[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
_a : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
_a : List[str] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_a : Optional[Any] = in_proj_weight[: hidden_size, :]
_a : Any = in_proj_bias[:config.hidden_size]
_a : List[str] = in_proj_weight[hidden_size : hidden_size * 2, :]
_a : Optional[Any] = in_proj_bias[hidden_size : hidden_size * 2]
_a : List[str] = in_proj_weight[-hidden_size :, :]
_a : int = in_proj_bias[-hidden_size :]
# fmt: on
def __UpperCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
_a : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_a : Dict = Image.open(requests.get(__a ,stream=__a ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( __a : str ,__a : str ,__a : str ,__a : bool = False ) -> Union[str, Any]:
"""simple docstring"""
_a : Optional[Any] = get_maskformer_config(__a )
# load original state_dict
with open(__a ,'''rb''' ) as f:
_a : str = pickle.load(__a )
_a : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
_a : Any = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
read_in_swin_q_k_v(__a ,config.backbone_config )
read_in_decoder_q_k_v(__a ,__a )
# update to torch tensors
for key, value in state_dict.items():
_a : Optional[int] = torch.from_numpy(__a )
# load 🤗 model
_a : Dict = MaskFormerForInstanceSegmentation(__a )
model.eval()
for name, param in model.named_parameters():
print(__a ,param.shape )
_a , _a : Tuple = model.load_state_dict(__a ,strict=__a )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__a ) == 0, F"""Unexpected keys: {unexpected_keys}"""
# verify results
_a : Union[str, Any] = prepare_img()
if "vistas" in model_name:
_a : int = 65
elif "cityscapes" in model_name:
_a : Tuple = 65_535
else:
_a : str = 255
_a : Dict = True if '''ade''' in model_name else False
_a : Optional[Any] = MaskFormerImageProcessor(ignore_index=__a ,reduce_labels=__a )
_a : Optional[Any] = image_processor(__a ,return_tensors='''pt''' )
_a : int = model(**__a )
print('''Logits:''' ,outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
_a : Union[str, Any] = torch.tensor(
[[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__a ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__a ).mkdir(exist_ok=__a )
model.save_pretrained(__a )
image_processor.save_pretrained(__a )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(F"""nielsr/{model_name}""" )
image_processor.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 14 | 1 |
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : str = abs(__a )
_a : Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
_a : Union[str, Any] = abs(__a )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __UpperCAmelCase ( __a : int ) -> int:
"""simple docstring"""
return sum(int(__a ) for c in str(abs(__a ) ) )
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__a : Callable ,__a : int ) -> None:
_a : str = F"""{func.__name__}({value})"""
_a : List[Any] = timeit(F"""__main__.{call}""" ,setup='''import __main__''' )
print(F"""{call:56} = {func(__a )} -- {timing:.4f} seconds""" )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__a ,__a )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 14 |
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
a__ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = XLMProphetNetTokenizer
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = True
def __lowercase ( self ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
_a : List[Any] = XLMProphetNetTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __lowercase ( self ) -> Any:
_a : Tuple = '''[PAD]'''
_a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a )
def __lowercase ( self ) -> str:
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''[PAD]''' )
self.assertEqual(vocab_keys[1] , '''[CLS]''' )
self.assertEqual(vocab_keys[-1] , '''j''' )
self.assertEqual(len(_a ) , 1_0_1_2 )
def __lowercase ( self ) -> Union[str, Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_1_2 )
def __lowercase ( self ) -> str:
_a : Tuple = XLMProphetNetTokenizer(_a , keep_accents=_a )
_a : Union[str, Any] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_a , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
_a : Optional[int] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
_a : List[Any] = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, -9, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, -9, 4]
] , )
_a : List[str] = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''[UNK]''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''[UNK]''',
'''.''',
] , )
@cached_property
def __lowercase ( self ) -> List[str]:
return XLMProphetNetTokenizer.from_pretrained('''microsoft/xprophetnet-large-wiki100-cased''' )
@slow
def __lowercase ( self ) -> Tuple:
_a : str = '''Hello World!'''
_a : Tuple = [3_5_3_8_9, 6_6_7_2, 4_9, 2]
self.assertListEqual(_a , self.big_tokenizer.encode(_a ) )
@slow
def __lowercase ( self ) -> str:
# fmt: off
_a : str = {'''input_ids''': [[1_1_0_7_3, 8_2_7_8_3, 1_8, 2_6, 8_2_7_8_3, 5_4_9, 5_1_5_4_0, 2_4_8, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 2_1_5_1_8_6, 1_3_2_5, 1_4_7, 1_7_2_0_9, 1_3_0_1, 2_1_7, 2_0, 5_6_3_7_0, 5_3, 1_2_2_0_2_0, 2_0, 1_6_4_7_7, 2_7, 8_7_3_5_5, 4_5_4_8, 2_0, 4_7_2_8, 7_8_3_9_2, 1_7, 1_5_9_9_6_9, 1_8, 2_6, 2_4_4_9_1, 6_2_9, 1_5, 5_3_8, 2_2_7_0_4, 5_4_3_9, 1_5, 2_7_8_8, 2_4_4_9_1, 9_8_8_5, 1_5, 4_3_5_3_4, 6_0_5, 1_5, 8_1_4, 1_8_4_0_3, 3_3_2_0_0, 2_9, 1_5, 4_3_5_3_4, 2_4_4_5_8, 1_2_4_1_0, 1_1_1, 2_4_9_6_6, 8_3_6_6_9, 9_6_3_7, 1_4_4_0_6_8, 2_6, 8_5_0, 2_2_3_4_6, 2_7, 1_4_7, 2_4_9_6_6, 8_3_6_6_9, 8_3_4_9_0, 2_6, 3_9_1_1_3, 7_3_5, 2_7, 6_8_9, 6_5_6, 2_8_0_0, 1_3_3_9, 4_6_0_0, 5_3, 1_2_2_0_2_0, 1_1_5_7_8_5, 3_4, 8_1_6, 1_3_3_9, 4_6_8_8_7, 1_8, 1_4_7, 5_3_9_0_5, 1_9_5_1, 4_2_2_3_8, 4_1_1_7_0, 1_7_7_3_2, 8_3_4, 4_3_6, 1_5, 2_7_5_2_3, 9_8_7_3_3, 2_1_7, 1_4_7, 5_5_4_2, 4_9_8_1, 9_3_0, 1_7_3_4_7, 1_6, 2], [2_0_0_9_1, 6_2_9, 9_4, 8_2_7_8_6, 5_8, 4_9_0, 2_0, 1_5_2_8, 8_4, 5_3_9_0_5, 3_4_4, 8_0_5_9_2, 1_1_0_1_2_8, 1_8_8_2_2, 5_2_6_7, 1_3_0_6, 6_2, 1_5_2_5_3_7, 3_0_8, 7_9_9_7, 4_0_1, 1_2_4_4_2_7, 5_4_9, 3_5_4_4_2, 2_2_5, 1_0_9, 1_5_0_5_5, 2_5_7_4_8, 1_4_7, 7_1_1_9, 4_3_7_1_2, 3_4, 7_6_7, 1_3_5_3_6_6, 1_8, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_9_2, 6_3_7_8_4, 1_1_9_4_6_6, 1_7, 1_4_7_8_0_8, 8_8_2_1_4, 1_8, 6_5_6, 8_1, 3_2, 3_2_9_6, 1_0_2_8_0, 1_6, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_a , model_name='''microsoft/xprophetnet-large-wiki100-cased''' , revision='''1acad1643ddd54a44df6a1b797ada8373685d90e''' , )
| 14 | 1 |
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(42)
a__ = '''bert-base-cased'''
a__ = '''fp16'''
a__ = '''bf16'''
a__ = [FPaa, BFaa]
@require_fsdp
@require_cuda
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> List[str]:
super().setUp()
_a : str = dict(
ACCELERATE_USE_FSDP='''true''' , MASTER_ADDR='''localhost''' , MASTER_PORT='''10999''' , RANK='''0''' , LOCAL_RANK='''0''' , WORLD_SIZE='''1''' , )
def __lowercase ( self ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(_a ):
_a : Union[str, Any] = self.dist_env.copy()
_a : int = F"""{i + 1}"""
_a : Dict = strategy
with mockenv_context(**_a ):
_a : Tuple = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def __lowercase ( self ) -> str:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(_a ):
_a : Dict = self.dist_env.copy()
_a : Any = prefetch_policy
with mockenv_context(**_a ):
_a : Tuple = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def __lowercase ( self ) -> List[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(_a ):
_a : List[Any] = self.dist_env.copy()
_a : List[Any] = state_dict_type
with mockenv_context(**_a ):
_a : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def __lowercase ( self ) -> List[Any]:
_a : List[Any] = AutoModel.from_pretrained(_a )
for policy in FSDP_AUTO_WRAP_POLICY:
_a : Any = self.dist_env.copy()
_a : str = policy
if policy == "TRANSFORMER_BASED_WRAP":
_a : Optional[Any] = '''BertLayer'''
elif policy == "SIZE_BASED_WRAP":
_a : Optional[int] = '''2000'''
with mockenv_context(**_a ):
_a : List[Any] = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_a : Optional[int] = self.dist_env.copy()
_a : Union[str, Any] = '''TRANSFORMER_BASED_WRAP'''
_a : Optional[Any] = '''T5Layer'''
with mockenv_context(**_a ):
_a : Tuple = FullyShardedDataParallelPlugin()
with self.assertRaises(_a ) as cm:
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertTrue('''Could not find the transformer layer class to wrap in the model.''' in str(cm.exception ) )
_a : int = self.dist_env.copy()
_a : Optional[Any] = '''SIZE_BASED_WRAP'''
_a : Optional[Any] = '''0'''
with mockenv_context(**_a ):
_a : str = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(_a )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def __lowercase ( self ) -> Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_a : List[str] = self.dist_env.copy()
_a : Optional[Any] = mp_dtype
with mockenv_context(**_a ):
_a : Tuple = Accelerator()
if mp_dtype == "fp16":
_a : Union[str, Any] = torch.floataa
elif mp_dtype == "bf16":
_a : str = torch.bfloataa
_a : Tuple = MixedPrecision(param_dtype=_a , reduce_dtype=_a , buffer_dtype=_a )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , _a )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , _a ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(_a )
def __lowercase ( self ) -> Optional[Any]:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_a : Union[str, Any] = self.dist_env.copy()
_a : Tuple = str(_a ).lower()
with mockenv_context(**_a ):
_a : int = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=_a ) )
@require_fsdp
@require_multi_gpu
@slow
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : List[Any] = 0.82
_a : str = [
'''fsdp_shard_grad_op_transformer_based_wrap''',
'''fsdp_full_shard_transformer_based_wrap''',
]
_a : str = {
'''multi_gpu_fp16''': 3_2_0_0,
'''fsdp_shard_grad_op_transformer_based_wrap_fp16''': 2_0_0_0,
'''fsdp_full_shard_transformer_based_wrap_fp16''': 1_9_0_0,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_a : Any = 1_6_0
_a : str = 1_6_0
_a : str = inspect.getfile(accelerate.test_utils )
_a : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''external_deps'''] )
def __lowercase ( self ) -> List[Any]:
_a : str = os.path.join(self.test_scripts_folder , '''test_performance.py''' )
_a : Union[str, Any] = ['''accelerate''', '''launch''', '''--num_processes=2''', '''--num_machines=1''', '''--machine_rank=0''', '''--use_fsdp''']
for config in self.performance_configs:
_a : Optional[int] = cmd.copy()
for i, strategy in enumerate(_a ):
if strategy.lower() in config:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append('''--mixed_precision=no''' )
else:
cmd_config.append('''--mixed_precision=fp16''' )
if "cpu_offload" in config:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def __lowercase ( self ) -> str:
_a : List[Any] = os.path.join(self.test_scripts_folder , '''test_checkpointing.py''' )
_a : Dict = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
'''--use_fsdp''',
'''--mixed_precision=fp16''',
'''--fsdp_transformer_layer_cls_to_wrap=BertLayer''',
]
for i, strategy in enumerate(_a ):
_a : int = cmd.copy()
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_a : int = len(_a )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_a : Dict = cmd_config[:state_dict_config_index]
cmd_config.append(F"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
'''--partial_train_epoch=1''',
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
_a : str = cmd_config[:-1]
_a : Union[str, Any] = os.path.join(self.tmpdir , '''epoch_0''' )
cmd_config.extend(
[
F"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
def __lowercase ( self ) -> int:
_a : Optional[int] = os.path.join(self.test_scripts_folder , '''test_peak_memory_usage.py''' )
_a : Tuple = [
'''accelerate''',
'''launch''',
'''--num_processes=2''',
'''--num_machines=1''',
'''--machine_rank=0''',
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_a : int = cmd.copy()
if "fp16" in spec:
cmd_config.extend(['''--mixed_precision=fp16'''] )
else:
cmd_config.extend(['''--mixed_precision=no'''] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(['''--use_fsdp'''] )
for i, strategy in enumerate(_a ):
if strategy.lower() in spec:
cmd_config.append(F"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append('''--fsdp_offload_params=True''' )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(F"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append('''--fsdp_transformer_layer_cls_to_wrap=BertLayer''' )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append('''--fsdp_min_num_params=2000''' )
cmd_config.extend(
[
self.test_file_path,
F"""--output_dir={self.tmpdir}""",
F"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
F"""--n_train={self.n_train}""",
F"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
| 14 |
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCAmelCase_ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : Any = LxmertTokenizer
UpperCAmelCase__ : Optional[Any] = LxmertTokenizerFast
UpperCAmelCase__ : Any = True
UpperCAmelCase__ : Dict = True
def __lowercase ( self ) -> Union[str, Any]:
super().setUp()
_a : int = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowercase ( self , _a ) -> List[str]:
_a : Tuple = '''UNwant\u00E9d,running'''
_a : str = '''unwanted, running'''
return input_text, output_text
def __lowercase ( self ) -> List[Any]:
_a : str = self.tokenizer_class(self.vocab_file )
_a : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_a , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , [7, 4, 5, 1_0, 8, 9] )
def __lowercase ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
_a : Optional[Any] = self.get_tokenizer()
_a : str = self.get_rust_tokenizer()
_a : Optional[Any] = '''I was born in 92000, and this is falsé.'''
_a : Optional[Any] = tokenizer.tokenize(_a )
_a : List[Any] = rust_tokenizer.tokenize(_a )
self.assertListEqual(_a , _a )
_a : List[Any] = tokenizer.encode(_a , add_special_tokens=_a )
_a : Any = rust_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
_a : Dict = self.get_rust_tokenizer()
_a : Optional[int] = tokenizer.encode(_a )
_a : Dict = rust_tokenizer.encode(_a )
self.assertListEqual(_a , _a )
| 14 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.