code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
snake_case__ = (DEISMultistepScheduler,)
snake_case__ = (("num_inference_steps", 2_5),)
def a ( self : str , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
lowerCAmelCase__ = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**SCREAMING_SNAKE_CASE__ )
return config
def a ( self : str , SCREAMING_SNAKE_CASE__ : Optional[int]=0 , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
lowerCAmelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ , lowerCAmelCase__ = sample, sample
for t in range(SCREAMING_SNAKE_CASE__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : Any ) -> int:
pass
def a ( self : Tuple , SCREAMING_SNAKE_CASE__ : str=0 , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Optional[Any]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase__ = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = new_scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int=None , **SCREAMING_SNAKE_CASE__ : str ) -> Tuple:
if scheduler is None:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
return sample
def a ( self : Union[str, Any] ) -> Union[str, Any]:
lowerCAmelCase__ = dict(self.forward_default_kwargs )
lowerCAmelCase__ = kwargs.pop("num_inference_steps" , SCREAMING_SNAKE_CASE__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase__ = self.get_scheduler_config()
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = self.dummy_sample
lowerCAmelCase__ = 0.1 * sample
if num_inference_steps is not None and hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
elif num_inference_steps is not None and not hasattr(SCREAMING_SNAKE_CASE__ , "set_timesteps" ):
lowerCAmelCase__ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase__ = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase__ = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase__ = scheduler.timesteps[5]
lowerCAmelCase__ = scheduler.timesteps[6]
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def a ( self : Any ) -> Optional[Any]:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
lowerCAmelCase__ = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase__ = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
lowerCAmelCase__ = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase__ = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase__ = self.full_loop(scheduler=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def a ( self : Union[str, Any] ) -> str:
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[str]:
self.check_over_configs(thresholding=SCREAMING_SNAKE_CASE__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , sample_max_value=SCREAMING_SNAKE_CASE__ , algorithm_type="deis" , solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , )
def a ( self : List[Any] ) -> Any:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE__ )
def a ( self : int ) -> Tuple:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
lowerCAmelCase__ = self.full_loop(
solver_order=SCREAMING_SNAKE_CASE__ , solver_type=SCREAMING_SNAKE_CASE__ , prediction_type=SCREAMING_SNAKE_CASE__ , algorithm_type=SCREAMING_SNAKE_CASE__ , )
assert not torch.isnan(SCREAMING_SNAKE_CASE__ ).any(), "Samples have nan numbers"
def a ( self : int ) -> Tuple:
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
self.check_over_configs(lower_order_final=SCREAMING_SNAKE_CASE__ )
def a ( self : Tuple ) -> List[str]:
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=SCREAMING_SNAKE_CASE__ , time_step=0 )
def a ( self : Optional[int] ) -> int:
lowerCAmelCase__ = self.full_loop()
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.23_916 ) < 1e-3
def a ( self : Tuple ) -> Dict:
lowerCAmelCase__ = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase__ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def a ( self : Optional[int] ) -> List[str]:
lowerCAmelCase__ = self.scheduler_classes[0]
lowerCAmelCase__ = self.get_scheduler_config(thresholding=SCREAMING_SNAKE_CASE__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase__ = scheduler_class(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = 10
lowerCAmelCase__ = self.dummy_model()
lowerCAmelCase__ = self.dummy_sample_deter.half()
scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase__ = model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = scheduler.step(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).prev_sample
assert sample.dtype == torch.floataa
| 61 |
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 | 0 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : float = 0 ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = row, column
SCREAMING_SNAKE_CASE : Optional[Any] = [[default_value for c in range(UpperCAmelCase_ )] for r in range(UpperCAmelCase_ )]
def __str__( self : Optional[int] ):
SCREAMING_SNAKE_CASE : Dict = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
SCREAMING_SNAKE_CASE : Dict = 0
for row_vector in self.array:
for obj in row_vector:
SCREAMING_SNAKE_CASE : Optional[Any] = max(UpperCAmelCase_ , len(str(UpperCAmelCase_ ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = f'''%{max_element_length}s'''
# Make string and return
def single_line(UpperCAmelCase_ : list[float] ) -> str:
nonlocal string_format_identifier
SCREAMING_SNAKE_CASE : Optional[int] = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(UpperCAmelCase_ ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
return str(self )
def _A ( self : Optional[int] , UpperCAmelCase_ : tuple[int, int] ):
if not (isinstance(UpperCAmelCase_ , (list, tuple) ) and len(UpperCAmelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Any , UpperCAmelCase_ : tuple[int, int] ):
assert self.validate_indicies(UpperCAmelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self : int , UpperCAmelCase_ : tuple[int, int] , UpperCAmelCase_ : float ):
assert self.validate_indicies(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = value
def __add__( self : Optional[int] , UpperCAmelCase_ : Matrix ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert self.row == another.row and self.column == another.column
# Add
SCREAMING_SNAKE_CASE : str = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : Any = self[r, c] + another[r, c]
return result
def __neg__( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : str = -self[r, c]
return result
def __sub__( self : Optional[Any] , UpperCAmelCase_ : Matrix ):
return self + (-another)
def __mul__( self : Dict , UpperCAmelCase_ : int | float | Matrix ):
if isinstance(UpperCAmelCase_ , (int, float) ): # Scalar multiplication
SCREAMING_SNAKE_CASE : Any = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : str = self[r, c] * another
return result
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): # Matrix multiplication
assert self.column == another.row
SCREAMING_SNAKE_CASE : Any = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
SCREAMING_SNAKE_CASE : List[str] = f'''Unsupported type given for another ({type(UpperCAmelCase_ )})'''
raise TypeError(UpperCAmelCase_ )
def _A ( self : int ):
SCREAMING_SNAKE_CASE : List[str] = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
SCREAMING_SNAKE_CASE : List[str] = self[r, c]
return result
def _A ( self : Union[str, Any] , UpperCAmelCase_ : Matrix , UpperCAmelCase_ : Matrix ):
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and isinstance(UpperCAmelCase_ , UpperCAmelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
SCREAMING_SNAKE_CASE : Tuple = v.transpose()
SCREAMING_SNAKE_CASE : int = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = Matrix(3 , 3 , 0 )
for i in range(3 ):
SCREAMING_SNAKE_CASE : str = 1
print(F'''a^(-1) is {ainv}''' )
# u, v
SCREAMING_SNAKE_CASE : Optional[int] = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = 1, 2, -3
SCREAMING_SNAKE_CASE : Tuple = Matrix(3 , 1 , 0 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = 4, -2, 5
print(F'''u is {u}''' )
print(F'''v is {v}''' )
print(F'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(F'''(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase , lowercase )}''' )
def lowerCamelCase__ ( ):
"""simple docstring"""
import doctest
doctest.testmod()
testa()
| 62 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : float , __lowerCamelCase : int ):
if digit_amount > 0:
return round(number - int(__lowerCamelCase ) , __lowerCamelCase )
return number - int(__lowerCamelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3))
| 63 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 0 |
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__) # pylint: disable=invalid-name
class _lowerCamelCase ( UpperCamelCase_ ):
def __init__( self , lowerCAmelCase , lowerCAmelCase=768 ) -> Union[str, Any]:
super().__init__(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= proj_size
SCREAMING_SNAKE_CASE__: Tuple= CLIPVisionModel(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= PaintByExampleMapper(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Tuple= nn.LayerNorm(config.hidden_size )
SCREAMING_SNAKE_CASE__: Union[str, Any]= nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
SCREAMING_SNAKE_CASE__: Optional[Any]= nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase=False ) -> Dict:
SCREAMING_SNAKE_CASE__: str= self.model(pixel_values=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= clip_output.pooler_output
SCREAMING_SNAKE_CASE__: List[Any]= self.mapper(latent_states[:, None] )
SCREAMING_SNAKE_CASE__: str= self.final_layer_norm(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= self.proj_out(lowerCAmelCase )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class _lowerCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__: List[str]= (config.num_hidden_layers + 1) // 5
SCREAMING_SNAKE_CASE__: Tuple= config.hidden_size
SCREAMING_SNAKE_CASE__: List[Any]= 1
SCREAMING_SNAKE_CASE__: List[Any]= nn.ModuleList(
[
BasicTransformerBlock(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , activation_fn='''gelu''' , attention_bias=lowerCAmelCase )
for _ in range(lowerCAmelCase )
] )
def UpperCamelCase_ ( self , lowerCAmelCase ) -> Optional[Any]:
for block in self.blocks:
SCREAMING_SNAKE_CASE__: List[Any]= block(lowerCAmelCase )
return hidden_states
| 64 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 | 0 |
"""simple docstring"""
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class __lowercase :
@property
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def __lowercase ( self : Dict ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'." )
def __lowercase ( self : List[str] ,A : List[str]=True ,A : Optional[int]=False ,A : Any=False ,A : int=False ,):
'''simple docstring'''
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : Tuple = 32
UpperCAmelCase__ : Optional[Any] = (32, 32)
UpperCAmelCase__ : Dict = torch.manual_seed(0 )
UpperCAmelCase__ : int = torch.device(A )
UpperCAmelCase__ : List[str] = (batch_size, num_channels) + sizes
UpperCAmelCase__ : List[Any] = randn_tensor(A ,generator=A ,device=A )
UpperCAmelCase__ : List[Any] = {"""hidden_states""": hidden_states}
if include_temb:
UpperCAmelCase__ : List[Any] = 128
UpperCAmelCase__ : Any = randn_tensor((batch_size, temb_channels) ,generator=A ,device=A )
if include_res_hidden_states_tuple:
UpperCAmelCase__ : Union[str, Any] = torch.manual_seed(1 )
UpperCAmelCase__ : Tuple = (randn_tensor(A ,generator=A ,device=A ),)
if include_encoder_hidden_states:
UpperCAmelCase__ : Any = floats_tensor((batch_size, 32, 32) ).to(A )
if include_skip_sample:
UpperCAmelCase__ : Dict = randn_tensor(((batch_size, 3) + sizes) ,generator=A ,device=A )
return dummy_input
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
"""temb_channels""": 128,
}
if self.block_type == "up":
UpperCAmelCase__ : Union[str, Any] = 32
if self.block_type == "mid":
init_dict.pop("""out_channels""" )
UpperCAmelCase__ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def __lowercase ( self : List[Any] ,A : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ : List[Any] = self.block_class(**A )
unet_block.to(A )
unet_block.eval()
with torch.no_grad():
UpperCAmelCase__ : Tuple = unet_block(**A )
if isinstance(A ,A ):
UpperCAmelCase__ : Optional[Any] = output[0]
self.assertEqual(output.shape ,self.output_shape )
UpperCAmelCase__ : Optional[Any] = output[0, -1, -3:, -3:]
UpperCAmelCase__ : Dict = torch.tensor(A ).to(A )
assert torch_all_close(output_slice.flatten() ,A ,atol=5e-3 )
@unittest.skipIf(torch_device == """mps""" ,"""Training is not supported in mps""" )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : int = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ : List[str] = self.block_class(**A )
model.to(A )
model.train()
UpperCAmelCase__ : List[Any] = model(**A )
if isinstance(A ,A ):
UpperCAmelCase__ : Tuple = output[0]
UpperCAmelCase__ : Any = torch.device(A )
UpperCAmelCase__ : Dict = randn_tensor(output.shape ,device=A )
UpperCAmelCase__ : str = torch.nn.functional.mse_loss(A ,A )
loss.backward()
| 65 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 | 0 |
from ..utils import DummyObject, requires_backends
class lowerCAmelCase_ ( metaclass=__snake_case ):
_UpperCamelCase : Dict = ["torch", "scipy"]
def __init__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(self , ['torch', 'scipy'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch', 'scipy'] )
@classmethod
def __a ( cls , *_lowerCAmelCase , **_lowerCAmelCase ):
requires_backends(cls , ['torch', 'scipy'] )
| 66 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48 | 0 |
import argparse
import os
import re
snake_case = """src/transformers/models/auto"""
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
snake_case = re.compile(R"""[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict""")
# re pattern that matches identifiers in mappings
snake_case = re.compile(R"""\s*\(\s*\"(\S[^\"]+)\"""")
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :bool = False ) -> int:
with open(snake_case__ , 'r' , encoding='utf-8' ) as f:
_lowercase = f.read()
_lowercase = content.split('\n' )
_lowercase = []
_lowercase = 0
while line_idx < len(snake_case__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_lowercase = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
_lowercase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_lowercase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_lowercase = sorted(snake_case__ , key=lambda snake_case__ : _re_identifier.search(snake_case__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(snake_case__ ) )
elif "\n".join(snake_case__ ) != content:
return True
def SCREAMING_SNAKE_CASE__ ( snake_case__ :bool = False ) -> Tuple:
_lowercase = [os.path.join(snake_case__ , snake_case__ ) for f in os.listdir(snake_case__ ) if f.endswith('.py' )]
_lowercase = [sort_auto_mapping(snake_case__ , overwrite=snake_case__ ) for fname in fnames]
if not overwrite and any(snake_case__ ):
_lowercase = [f for f, d in zip(snake_case__ , snake_case__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {", ".join(snake_case__ )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
snake_case = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 67 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 0 |
import re
from filelock import FileLock
try:
import nltk
__A = True
except (ImportError, ModuleNotFoundError):
__A = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def lowercase__ ( A_: str ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , A_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(A_ ) )
| 68 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 0 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 69 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 | 0 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowerCamelCase : str = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def _SCREAMING_SNAKE_CASE ( lowercase : str ):
'''simple docstring'''
lowerCamelCase_ = list(s_dict.keys() )
for key in keys:
lowerCamelCase_ = r'.*/layers_(\d+)'
lowerCamelCase_ = key
if re.match(lowercase , lowercase ):
lowerCamelCase_ = re.sub(r'layers_(\d+)' , r'block/\1/layer' , lowercase )
lowerCamelCase_ = r'(encoder|decoder)\/'
if re.match(lowercase , lowercase ):
lowerCamelCase_ = re.match(lowercase , lowercase ).groups()
if groups[0] == "encoder":
lowerCamelCase_ = re.sub(r'/mlp/' , r'/1/mlp/' , lowercase )
lowerCamelCase_ = re.sub(r'/pre_mlp_layer_norm/' , r'/1/layer_norm/' , lowercase )
elif groups[0] == "decoder":
lowerCamelCase_ = re.sub(r'/mlp/' , r'/2/mlp/' , lowercase )
lowerCamelCase_ = re.sub(r'/pre_mlp_layer_norm/' , r'/2/layer_norm/' , lowercase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCamelCase_ = new_key.replace(lowercase , lowercase )
print(f"""{key} -> {new_key}""" )
lowerCamelCase_ = s_dict.pop(lowercase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase_ = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCamelCase_ = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCamelCase_ = s_dict[key].shape[0]
lowerCamelCase_ = s_dict[key]
for idx in range(lowercase ):
lowerCamelCase_ = expert_weihts[idx]
print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" )
s_dict.pop(lowercase )
return s_dict
lowerCamelCase : int = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : int ):
'''simple docstring'''
import regex as re
with open(lowercase , 'r' ) as f:
lowerCamelCase_ = f.read()
lowerCamelCase_ = re.findall(r'(.*) = ([0-9.]*)' , lowercase )
lowerCamelCase_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCamelCase_ = float(lowercase ) if '.' in value else int(lowercase )
lowerCamelCase_ = re.findall(r'(.*activations) = \(\'(.*)\',\)' , lowercase )[0]
lowerCamelCase_ = str(activation[1] )
lowerCamelCase_ = num_experts
lowerCamelCase_ = SwitchTransformersConfig(**lowercase )
return config
def _SCREAMING_SNAKE_CASE ( lowercase : Dict , lowercase : List[Any] , lowercase : Optional[int]=None , lowercase : Optional[Any]="./" , lowercase : Any=8 ):
'''simple docstring'''
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
lowerCamelCase_ = checkpoints.load_tax_checkpoint(lowercase )
if gin_file is not None:
lowerCamelCase_ = convert_gin_to_config(lowercase , lowercase )
else:
lowerCamelCase_ = SwitchTransformersConfig.from_pretrained(lowercase )
lowerCamelCase_ = SwitchTransformersForConditionalGeneration(lowercase )
lowerCamelCase_ = flax_params['target']
lowerCamelCase_ = flatten_dict(lowercase , sep='/' )
lowerCamelCase_ = rename_keys(lowercase )
lowerCamelCase_ = unflatten_dict(lowercase , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowercase , lowercase )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
lowerCamelCase : Tuple = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 70 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case (__SCREAMING_SNAKE_CASE , unittest.TestCase):
__A : List[Any] =CanineTokenizer
__A : Any =False
def UpperCamelCase__ ( self ):
super().setUp()
UpperCAmelCase_ : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCamelCase__ ( self ):
return CanineTokenizer.from_pretrained("google/canine-s" )
def UpperCamelCase__ ( self ,**_snake_case ):
UpperCAmelCase_ : Optional[int] = self.tokenizer_class.from_pretrained(self.tmpdirname ,**_snake_case )
UpperCAmelCase_ : int = 10_24
return tokenizer
@require_torch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.canine_tokenizer
UpperCAmelCase_ : str = ["Life is like a box of chocolates.", "You never know what you're gonna get."]
# fmt: off
UpperCAmelCase_ : Any = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case ,padding=_snake_case ,return_tensors="pt" )
self.assertIsInstance(_snake_case ,_snake_case )
UpperCAmelCase_ : str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_snake_case ,_snake_case )
self.assertEqual((2, 39) ,batch.input_ids.shape )
self.assertEqual((2, 39) ,batch.attention_mask.shape )
@require_torch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.canine_tokenizer
UpperCAmelCase_ : List[Any] = ["Once there was a man.", "He wrote a test in HuggingFace Tranformers."]
UpperCAmelCase_ : Tuple = tokenizer(_snake_case ,padding=_snake_case ,return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" ,_snake_case )
self.assertIn("attention_mask" ,_snake_case )
self.assertIn("token_type_ids" ,_snake_case )
@require_torch
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.canine_tokenizer
UpperCAmelCase_ : Tuple = [
"What's the weater?",
"It's about 25 degrees.",
]
UpperCAmelCase_ : Union[str, Any] = tokenizer(
text_target=_snake_case ,max_length=32 ,padding="max_length" ,truncation=_snake_case ,return_tensors="pt" )
self.assertEqual(32 ,targets["input_ids"].shape[1] )
def UpperCamelCase__ ( self ):
# safety check on max_len default value so we are sure the test works
UpperCAmelCase_ : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length ,42 )
# Now let's start the test
UpperCAmelCase_ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ : List[str] = tempfile.mkdtemp()
UpperCAmelCase_ : Dict = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ : int = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.__class__.from_pretrained(_snake_case )
UpperCAmelCase_ : Optional[Any] = after_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
shutil.rmtree(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ : Any = tempfile.mkdtemp()
UpperCAmelCase_ : List[str] = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ : Union[str, Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase_ : Any = chr(0xE_007 )
additional_special_tokens.append(_snake_case )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ : int = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
tokenizer.save_pretrained(_snake_case )
UpperCAmelCase_ : Optional[Any] = tokenizer.__class__.from_pretrained(_snake_case )
UpperCAmelCase_ : List[Any] = after_tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
self.assertIn(_snake_case ,after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length ,42 )
UpperCAmelCase_ : List[str] = tokenizer.__class__.from_pretrained(_snake_case ,model_max_length=43 )
self.assertEqual(tokenizer.model_max_length ,43 )
shutil.rmtree(_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.get_clean_sequence(_snake_case )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ : int = 0xE_005
UpperCAmelCase_ : int = chr(_snake_case )
tokenizer.add_special_tokens({"cls_token": special_token} )
UpperCAmelCase_ : int = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertEqual(len(_snake_case ) ,1 )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(ids + encoded_special_token ,clean_up_tokenization_spaces=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : str = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : Optional[int] = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
self.assertEqual(_snake_case ,input_encoded + special_token_id )
UpperCAmelCase_ : Optional[Any] = tokenizer.decode(_snake_case ,skip_special_tokens=_snake_case )
self.assertTrue(special_token not in decoded )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ : str = chr(0xE_005 )
UpperCAmelCase_ : List[str] = chr(0xE_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] ,special_tokens=_snake_case )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
UpperCAmelCase_ : Optional[int] = tokenizer.tokenize(_snake_case )
UpperCAmelCase_ : Any = tokenizer.tokenize(_snake_case )
self.assertEqual(len(_snake_case ) ,1 )
self.assertEqual(len(_snake_case ) ,1 )
self.assertEqual(token_a[0] ,_snake_case )
self.assertEqual(token_a[0] ,_snake_case )
@require_tokenizers
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[int] = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
UpperCAmelCase_ : Union[str, Any] = 0xE_006
UpperCAmelCase_ : Any = chr(_snake_case )
UpperCAmelCase_ : Tuple = AddedToken(_snake_case ,lstrip=_snake_case )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_snake_case )
tokenizer.from_pretrained(_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_snake_case )
with open(os.path.join(_snake_case ,"special_tokens_map.json" ) ,encoding="utf-8" ) as json_file:
UpperCAmelCase_ : Any = json.load(_snake_case )
with open(os.path.join(_snake_case ,"tokenizer_config.json" ) ,encoding="utf-8" ) as json_file:
UpperCAmelCase_ : Dict = json.load(_snake_case )
# a special token for Canine can be defined as follows:
UpperCAmelCase_ : Tuple = 0xE_006
UpperCAmelCase_ : str = chr(_snake_case )
UpperCAmelCase_ : Any = [new_token_a]
UpperCAmelCase_ : Any = [new_token_a]
with open(os.path.join(_snake_case ,"special_tokens_map.json" ) ,"w" ,encoding="utf-8" ) as outfile:
json.dump(_snake_case ,_snake_case )
with open(os.path.join(_snake_case ,"tokenizer_config.json" ) ,"w" ,encoding="utf-8" ) as outfile:
json.dump(_snake_case ,_snake_case )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ : List[Any] = tokenizer_class.from_pretrained(_snake_case ,extra_ids=0 )
self.assertIn(_snake_case ,tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) ,)
UpperCAmelCase_ : List[str] = 0xE_007
UpperCAmelCase_ : Any = chr(_snake_case )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ : List[Any] = [AddedToken(_snake_case ,lstrip=_snake_case )]
UpperCAmelCase_ : List[Any] = tokenizer_class.from_pretrained(
_snake_case ,additional_special_tokens=_snake_case ,extra_ids=0 )
self.assertIn(_snake_case ,tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] ,tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : int = self.get_tokenizers(do_lower_case=_snake_case )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ : Tuple = "hello world"
if self.space_between_special_tokens:
UpperCAmelCase_ : str = "[CLS] hello world [SEP]"
else:
UpperCAmelCase_ : List[str] = input
UpperCAmelCase_ : str = tokenizer.encode(_snake_case ,add_special_tokens=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer.decode(_snake_case ,spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_snake_case ,[output, output.lower()] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ : Optional[int] = [
"bos_token",
"eos_token",
"unk_token",
"sep_token",
"pad_token",
"cls_token",
"mask_token",
]
UpperCAmelCase_ : Tuple = "a"
UpperCAmelCase_ : int = ord(_snake_case )
for attr in attributes_list:
setattr(_snake_case ,attr + "_id" ,_snake_case )
self.assertEqual(getattr(_snake_case ,_snake_case ) ,_snake_case )
self.assertEqual(getattr(_snake_case ,attr + "_id" ) ,_snake_case )
setattr(_snake_case ,attr + "_id" ,_snake_case )
self.assertEqual(getattr(_snake_case ,_snake_case ) ,_snake_case )
self.assertEqual(getattr(_snake_case ,attr + "_id" ) ,_snake_case )
setattr(_snake_case ,"additional_special_tokens_ids" ,[] )
self.assertListEqual(getattr(_snake_case ,"additional_special_tokens" ) ,[] )
self.assertListEqual(getattr(_snake_case ,"additional_special_tokens_ids" ) ,[] )
UpperCAmelCase_ : Optional[int] = 0xE_006
UpperCAmelCase_ : List[Any] = chr(_snake_case )
setattr(_snake_case ,"additional_special_tokens_ids" ,[additional_special_token_id] )
self.assertListEqual(getattr(_snake_case ,"additional_special_tokens" ) ,[additional_special_token] )
self.assertListEqual(getattr(_snake_case ,"additional_special_tokens_ids" ) ,[additional_special_token_id] )
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
def UpperCamelCase__ ( self ):
pass
| 71 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/config.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/config.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/config.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/config.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/config.json''',
'''roberta-large-openai-detector''': '''https://huggingface.co/roberta-large-openai-detector/resolve/main/config.json''',
}
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = 'roberta'
def __init__( self , snake_case_=5_02_65 , snake_case_=7_68 , snake_case_=12 , snake_case_=12 , snake_case_=30_72 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_12 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1 , snake_case_=0 , snake_case_=2 , snake_case_="absolute" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
lowercase =vocab_size
lowercase =hidden_size
lowercase =num_hidden_layers
lowercase =num_attention_heads
lowercase =hidden_act
lowercase =intermediate_size
lowercase =hidden_dropout_prob
lowercase =attention_probs_dropout_prob
lowercase =max_position_embeddings
lowercase =type_vocab_size
lowercase =initializer_range
lowercase =layer_norm_eps
lowercase =position_embedding_type
lowercase =use_cache
lowercase =classifier_dropout
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
@property
def _A( self ):
if self.task == "multiple-choice":
lowercase ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 72 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | 0 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class _snake_case ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_lowercase : Tuple = [('''size''', ctypes.c_int), ('''visible''', ctypes.c_byte)]
def lowerCamelCase__ ():
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCAmelCase , ctypes.byref(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCAmelCase , ctypes.byref(_UpperCAmelCase))
elif os.name == "posix":
sys.stdout.write('\033[?25l')
sys.stdout.flush()
def lowerCamelCase__ ():
if os.name == "nt":
SCREAMING_SNAKE_CASE = CursorInfo()
SCREAMING_SNAKE_CASE = ctypes.windll.kernelaa.GetStdHandle(-11)
ctypes.windll.kernelaa.GetConsoleCursorInfo(_UpperCAmelCase , ctypes.byref(_UpperCAmelCase))
SCREAMING_SNAKE_CASE = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_UpperCAmelCase , ctypes.byref(_UpperCAmelCase))
elif os.name == "posix":
sys.stdout.write('\033[?25h')
sys.stdout.flush()
@contextmanager
def lowerCamelCase__ ():
try:
hide_cursor()
yield
finally:
show_cursor()
| 73 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowercase_ = False
lowercase_ = True
lowercase_ = False
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowercase_ = parser.parse_args()
lowercase_ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowercase_ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowercase_ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowercase_ = reader.read()
lowercase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowercase_ = UNetaDModel(**config)
else:
lowercase_ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowercase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowercase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowercase_ = config[key]
del config[key]
lowercase_ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowercase_ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowercase_ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowercase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowercase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowercase_ = param_value
lowercase_ = True
if not has_changed:
lowercase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 74 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'timm_backbone'
def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 48 | 0 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format='''%(message)s''')
def a__ ( lowerCAmelCase__ ) -> np.ndarray:
return input_array.reshape((input_array.size, 1) )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : str = np.nan
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = features[:, labels == i]
UpperCAmelCase__ : List[Any] = data.mean(1 )
# Centralize the data of class i
UpperCAmelCase__ : List[str] = data - column_reshape(lowerCAmelCase__ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(lowerCAmelCase__ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ : Tuple = np.dot(lowerCAmelCase__ , centered_data.T )
return covariance_sum / features.shape[1]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
UpperCAmelCase__ : int = features.mean(1 )
UpperCAmelCase__ : List[Any] = np.nan
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[int] = features[:, labels == i]
UpperCAmelCase__ : List[Any] = data.shape[1]
UpperCAmelCase__ : int = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ ) , (column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCAmelCase__ : Union[str, Any] = device_data * np.dot(
column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ ) , (column_reshape(lowerCAmelCase__ ) - column_reshape(lowerCAmelCase__ )).T , )
return covariance_sum / features.shape[1]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
# Check if the features have been loaded
if features.any():
UpperCAmelCase__ : List[str] = features.mean(1 )
# Center the dataset
UpperCAmelCase__ : Union[str, Any] = features - np.reshape(lowerCAmelCase__ , (data_mean.size, 1) )
UpperCAmelCase__ : List[str] = np.dot(lowerCAmelCase__ , centered_data.T ) / features.shape[1]
UpperCAmelCase__ , UpperCAmelCase__ : Dict = np.linalg.eigh(lowerCAmelCase__ )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCAmelCase__ : Optional[Any] = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCAmelCase__ : List[str] = np.dot(filtered_eigenvectors.T , lowerCAmelCase__ )
logging.info('''Principal Component Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=lowerCAmelCase__ )
logging.error('''Dataset empty''' )
raise AssertionError
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = eigh(
covariance_between_classes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , covariance_within_classes(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) , )
UpperCAmelCase__ : List[Any] = eigenvectors[:, ::-1][:, :dimensions]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = np.linalg.svd(lowerCAmelCase__ )
UpperCAmelCase__ : Optional[Any] = svd_matrix[:, 0:dimensions]
UpperCAmelCase__ : List[Any] = np.dot(filtered_svd_matrix.T , lowerCAmelCase__ )
logging.info('''Linear Discriminant Analysis computed''' )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format='''%(message)s''' , force=lowerCAmelCase__ )
logging.error('''Dataset empty''' )
raise AssertionError
def a__ ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
UpperCAmelCase__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCAmelCase__ : List[str] = np.array([0, 0, 0, 1, 1] )
UpperCAmelCase__ : Optional[int] = 2
UpperCAmelCase__ : List[str] = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(lowerCAmelCase__ ) as error_info:
UpperCAmelCase__ : Optional[int] = linear_discriminant_analysis(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , np.ndarray ):
raise AssertionError(
'''Did not raise AssertionError for dimensions > classes''' )
assert error_info.type is AssertionError
def a__ ( ) -> None:
UpperCAmelCase__ : Union[str, Any] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : List[Any] = np.array([[6.9_2_8_2_0_3_2_3, 8.6_6_0_2_5_4_0_4, 1_0.3_9_2_3_0_4_8_5], [3.0, 3.0, 3.0]] )
with pytest.raises(lowerCAmelCase__ ) as error_info:
UpperCAmelCase__ : str = principal_component_analysis(lowerCAmelCase__ , lowerCAmelCase__ )
if not np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase ="openai/whisper-base"
UpperCamelCase =(
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
UpperCamelCase ="transcriber"
UpperCamelCase =WhisperProcessor
UpperCamelCase =WhisperForConditionalGeneration
UpperCamelCase =["audio"]
UpperCamelCase =["text"]
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]:
return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).input_features
def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]:
return self.model.generate(inputs=UpperCamelCase_ )
def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]:
return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0]
| 76 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A = logging.get_logger(__name__)
A = {"""vocab_file""": """vocab.txt"""}
A = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
A = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
A = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a__ ( __magic_name__ ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_INIT_CONFIGURATION
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ConvBertTokenizer
def __init__( self : Union[str, Any] , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : Optional[Any]=True , UpperCamelCase_ : Optional[int]="[UNK]" , UpperCamelCase_ : str="[SEP]" , UpperCamelCase_ : Dict="[PAD]" , UpperCamelCase_ : Tuple="[CLS]" , UpperCamelCase_ : Dict="[MASK]" , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : Any , ):
"""simple docstring"""
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
__UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , UpperCamelCase_) != do_lower_case
or normalizer_state.get("strip_accents" , UpperCamelCase_) != strip_accents
or normalizer_state.get("handle_chinese_chars" , UpperCamelCase_) != tokenize_chinese_chars
):
__UpperCAmelCase : Tuple = getattr(UpperCamelCase_ , normalizer_state.pop("type"))
__UpperCAmelCase : int = do_lower_case
__UpperCAmelCase : Any = strip_accents
__UpperCAmelCase : List[str] = tokenize_chinese_chars
__UpperCAmelCase : Any = normalizer_class(**UpperCamelCase_)
__UpperCAmelCase : List[str] = do_lower_case
def a_ ( self : Optional[int] , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Optional[int]=None):
"""simple docstring"""
__UpperCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a_ ( self : int , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None):
"""simple docstring"""
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def a_ ( self : Tuple , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_)
return tuple(UpperCamelCase_)
| 77 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] =[
'Audio',
'Array2D',
'Array3D',
'Array4D',
'Array5D',
'ClassLabel',
'Features',
'Sequence',
'Value',
'Image',
'Translation',
'TranslationVariableLanguages',
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 78 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
SCREAMING_SNAKE_CASE__ : str = {
"""Acehnese Arabic""": """ace_Arab""",
"""Acehnese Latin""": """ace_Latn""",
"""Mesopotamian Arabic""": """acm_Arab""",
"""Ta'izzi-Adeni Arabic""": """acq_Arab""",
"""Tunisian Arabic""": """aeb_Arab""",
"""Afrikaans""": """afr_Latn""",
"""South Levantine Arabic""": """ajp_Arab""",
"""Akan""": """aka_Latn""",
"""Amharic""": """amh_Ethi""",
"""North Levantine Arabic""": """apc_Arab""",
"""Modern Standard Arabic""": """arb_Arab""",
"""Modern Standard Arabic Romanized""": """arb_Latn""",
"""Najdi Arabic""": """ars_Arab""",
"""Moroccan Arabic""": """ary_Arab""",
"""Egyptian Arabic""": """arz_Arab""",
"""Assamese""": """asm_Beng""",
"""Asturian""": """ast_Latn""",
"""Awadhi""": """awa_Deva""",
"""Central Aymara""": """ayr_Latn""",
"""South Azerbaijani""": """azb_Arab""",
"""North Azerbaijani""": """azj_Latn""",
"""Bashkir""": """bak_Cyrl""",
"""Bambara""": """bam_Latn""",
"""Balinese""": """ban_Latn""",
"""Belarusian""": """bel_Cyrl""",
"""Bemba""": """bem_Latn""",
"""Bengali""": """ben_Beng""",
"""Bhojpuri""": """bho_Deva""",
"""Banjar Arabic""": """bjn_Arab""",
"""Banjar Latin""": """bjn_Latn""",
"""Standard Tibetan""": """bod_Tibt""",
"""Bosnian""": """bos_Latn""",
"""Buginese""": """bug_Latn""",
"""Bulgarian""": """bul_Cyrl""",
"""Catalan""": """cat_Latn""",
"""Cebuano""": """ceb_Latn""",
"""Czech""": """ces_Latn""",
"""Chokwe""": """cjk_Latn""",
"""Central Kurdish""": """ckb_Arab""",
"""Crimean Tatar""": """crh_Latn""",
"""Welsh""": """cym_Latn""",
"""Danish""": """dan_Latn""",
"""German""": """deu_Latn""",
"""Southwestern Dinka""": """dik_Latn""",
"""Dyula""": """dyu_Latn""",
"""Dzongkha""": """dzo_Tibt""",
"""Greek""": """ell_Grek""",
"""English""": """eng_Latn""",
"""Esperanto""": """epo_Latn""",
"""Estonian""": """est_Latn""",
"""Basque""": """eus_Latn""",
"""Ewe""": """ewe_Latn""",
"""Faroese""": """fao_Latn""",
"""Fijian""": """fij_Latn""",
"""Finnish""": """fin_Latn""",
"""Fon""": """fon_Latn""",
"""French""": """fra_Latn""",
"""Friulian""": """fur_Latn""",
"""Nigerian Fulfulde""": """fuv_Latn""",
"""Scottish Gaelic""": """gla_Latn""",
"""Irish""": """gle_Latn""",
"""Galician""": """glg_Latn""",
"""Guarani""": """grn_Latn""",
"""Gujarati""": """guj_Gujr""",
"""Haitian Creole""": """hat_Latn""",
"""Hausa""": """hau_Latn""",
"""Hebrew""": """heb_Hebr""",
"""Hindi""": """hin_Deva""",
"""Chhattisgarhi""": """hne_Deva""",
"""Croatian""": """hrv_Latn""",
"""Hungarian""": """hun_Latn""",
"""Armenian""": """hye_Armn""",
"""Igbo""": """ibo_Latn""",
"""Ilocano""": """ilo_Latn""",
"""Indonesian""": """ind_Latn""",
"""Icelandic""": """isl_Latn""",
"""Italian""": """ita_Latn""",
"""Javanese""": """jav_Latn""",
"""Japanese""": """jpn_Jpan""",
"""Kabyle""": """kab_Latn""",
"""Jingpho""": """kac_Latn""",
"""Kamba""": """kam_Latn""",
"""Kannada""": """kan_Knda""",
"""Kashmiri Arabic""": """kas_Arab""",
"""Kashmiri Devanagari""": """kas_Deva""",
"""Georgian""": """kat_Geor""",
"""Central Kanuri Arabic""": """knc_Arab""",
"""Central Kanuri Latin""": """knc_Latn""",
"""Kazakh""": """kaz_Cyrl""",
"""Kabiyè""": """kbp_Latn""",
"""Kabuverdianu""": """kea_Latn""",
"""Khmer""": """khm_Khmr""",
"""Kikuyu""": """kik_Latn""",
"""Kinyarwanda""": """kin_Latn""",
"""Kyrgyz""": """kir_Cyrl""",
"""Kimbundu""": """kmb_Latn""",
"""Northern Kurdish""": """kmr_Latn""",
"""Kikongo""": """kon_Latn""",
"""Korean""": """kor_Hang""",
"""Lao""": """lao_Laoo""",
"""Ligurian""": """lij_Latn""",
"""Limburgish""": """lim_Latn""",
"""Lingala""": """lin_Latn""",
"""Lithuanian""": """lit_Latn""",
"""Lombard""": """lmo_Latn""",
"""Latgalian""": """ltg_Latn""",
"""Luxembourgish""": """ltz_Latn""",
"""Luba-Kasai""": """lua_Latn""",
"""Ganda""": """lug_Latn""",
"""Luo""": """luo_Latn""",
"""Mizo""": """lus_Latn""",
"""Standard Latvian""": """lvs_Latn""",
"""Magahi""": """mag_Deva""",
"""Maithili""": """mai_Deva""",
"""Malayalam""": """mal_Mlym""",
"""Marathi""": """mar_Deva""",
"""Minangkabau Arabic """: """min_Arab""",
"""Minangkabau Latin""": """min_Latn""",
"""Macedonian""": """mkd_Cyrl""",
"""Plateau Malagasy""": """plt_Latn""",
"""Maltese""": """mlt_Latn""",
"""Meitei Bengali""": """mni_Beng""",
"""Halh Mongolian""": """khk_Cyrl""",
"""Mossi""": """mos_Latn""",
"""Maori""": """mri_Latn""",
"""Burmese""": """mya_Mymr""",
"""Dutch""": """nld_Latn""",
"""Norwegian Nynorsk""": """nno_Latn""",
"""Norwegian Bokmål""": """nob_Latn""",
"""Nepali""": """npi_Deva""",
"""Northern Sotho""": """nso_Latn""",
"""Nuer""": """nus_Latn""",
"""Nyanja""": """nya_Latn""",
"""Occitan""": """oci_Latn""",
"""West Central Oromo""": """gaz_Latn""",
"""Odia""": """ory_Orya""",
"""Pangasinan""": """pag_Latn""",
"""Eastern Panjabi""": """pan_Guru""",
"""Papiamento""": """pap_Latn""",
"""Western Persian""": """pes_Arab""",
"""Polish""": """pol_Latn""",
"""Portuguese""": """por_Latn""",
"""Dari""": """prs_Arab""",
"""Southern Pashto""": """pbt_Arab""",
"""Ayacucho Quechua""": """quy_Latn""",
"""Romanian""": """ron_Latn""",
"""Rundi""": """run_Latn""",
"""Russian""": """rus_Cyrl""",
"""Sango""": """sag_Latn""",
"""Sanskrit""": """san_Deva""",
"""Santali""": """sat_Olck""",
"""Sicilian""": """scn_Latn""",
"""Shan""": """shn_Mymr""",
"""Sinhala""": """sin_Sinh""",
"""Slovak""": """slk_Latn""",
"""Slovenian""": """slv_Latn""",
"""Samoan""": """smo_Latn""",
"""Shona""": """sna_Latn""",
"""Sindhi""": """snd_Arab""",
"""Somali""": """som_Latn""",
"""Southern Sotho""": """sot_Latn""",
"""Spanish""": """spa_Latn""",
"""Tosk Albanian""": """als_Latn""",
"""Sardinian""": """srd_Latn""",
"""Serbian""": """srp_Cyrl""",
"""Swati""": """ssw_Latn""",
"""Sundanese""": """sun_Latn""",
"""Swedish""": """swe_Latn""",
"""Swahili""": """swh_Latn""",
"""Silesian""": """szl_Latn""",
"""Tamil""": """tam_Taml""",
"""Tatar""": """tat_Cyrl""",
"""Telugu""": """tel_Telu""",
"""Tajik""": """tgk_Cyrl""",
"""Tagalog""": """tgl_Latn""",
"""Thai""": """tha_Thai""",
"""Tigrinya""": """tir_Ethi""",
"""Tamasheq Latin""": """taq_Latn""",
"""Tamasheq Tifinagh""": """taq_Tfng""",
"""Tok Pisin""": """tpi_Latn""",
"""Tswana""": """tsn_Latn""",
"""Tsonga""": """tso_Latn""",
"""Turkmen""": """tuk_Latn""",
"""Tumbuka""": """tum_Latn""",
"""Turkish""": """tur_Latn""",
"""Twi""": """twi_Latn""",
"""Central Atlas Tamazight""": """tzm_Tfng""",
"""Uyghur""": """uig_Arab""",
"""Ukrainian""": """ukr_Cyrl""",
"""Umbundu""": """umb_Latn""",
"""Urdu""": """urd_Arab""",
"""Northern Uzbek""": """uzn_Latn""",
"""Venetian""": """vec_Latn""",
"""Vietnamese""": """vie_Latn""",
"""Waray""": """war_Latn""",
"""Wolof""": """wol_Latn""",
"""Xhosa""": """xho_Latn""",
"""Eastern Yiddish""": """ydd_Hebr""",
"""Yoruba""": """yor_Latn""",
"""Yue Chinese""": """yue_Hant""",
"""Chinese Simplified""": """zho_Hans""",
"""Chinese Traditional""": """zho_Hant""",
"""Standard Malay""": """zsm_Latn""",
"""Zulu""": """zul_Latn""",
}
class UpperCAmelCase_ ( __lowerCamelCase ):
__lowerCamelCase = 'facebook/nllb-200-distilled-600M'
__lowerCamelCase = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
__lowerCamelCase = 'translator'
__lowerCamelCase = AutoTokenizer
__lowerCamelCase = AutoModelForSeqaSeqLM
__lowerCamelCase = LANGUAGE_CODES
__lowerCamelCase = ['text', 'text', 'text']
__lowerCamelCase = ['text']
def __UpperCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
UpperCAmelCase__ : Union[str, Any] = self.lang_to_code[src_lang]
UpperCAmelCase__ : Dict = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_lowerCAmelCase , return_tensors="""pt""" , src_lang=_lowerCAmelCase , tgt_lang=_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.model.generate(**_lowerCAmelCase )
def __UpperCAmelCase ( self , _lowerCAmelCase ):
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=_lowerCAmelCase )
| 79 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48 | 0 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
__UpperCamelCase : Union[str, Any] = False
class __UpperCamelCase ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
def _a ( self : Any ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__lowercase = VersatileDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = generator.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt="""first prompt""" , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _a ( self : Any ) -> Dict:
"""simple docstring"""
__lowercase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__lowercase = """cyberpunk 2077"""
__lowercase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.dual_guided(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , text_to_image_strength=0.75 , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = """A painting of a squirrel eating a burger """
__lowercase = torch.manual_seed(0 )
__lowercase = pipe.text_to_image(
prompt=_lowerCAmelCase , generator=_lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
__lowercase = pipe.image_variation(_lowerCAmelCase , generator=_lowerCAmelCase , output_type="""numpy""" ).images
__lowercase = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
__lowercase = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 80 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 | 0 |
from numpy import exp, pi, sqrt
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase = 0.0 , __lowerCamelCase = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 81 |
'''simple docstring'''
from math import sqrt
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase_ ):
total += i + n // i
elif i == sqrt(UpperCamelCase_ ):
total += i
return total - n
def A ( UpperCamelCase_ : int = 1_00_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = sum(
i
for i in range(1 , UpperCamelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 48 | 0 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO
)
lowerCamelCase = logging.getLogger(__name__)
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser(
description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)"""
)
parser.add_argument(
"""--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset."""
)
parser.add_argument(
"""--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file."""
)
parser.add_argument("""--vocab_size""", default=30_522, type=int)
lowerCamelCase = parser.parse_args()
logger.info(F"Loading data from {args.data_file}")
with open(args.data_file, """rb""") as fp:
lowerCamelCase = pickle.load(fp)
logger.info("""Counting occurrences for MLM.""")
lowerCamelCase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowerCamelCase = [0] * args.vocab_size
for k, v in counter.items():
lowerCamelCase = v
logger.info(F"Dump to {args.token_counts_dump}")
with open(args.token_counts_dump, """wb""") as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 82 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase_ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = features.mean(1 )
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.shape[1]
lowerCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
lowerCAmelCase__ = features.mean(1 )
# Center the dataset
lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) )
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1]
lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ ,lowerCAmelCase__ = eigh(
covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ )
lowerCAmelCase__ = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = linear_discriminant_analysis(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ )
if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
"""simple docstring"""
def snake_case_ ( A_ : int ):
'''simple docstring'''
if not isinstance(A_, A_ ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
_lowerCamelCase : Tuple = str(A_ )
_lowerCamelCase : int = ''''''.join(sorted(A_ ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def snake_case_ ( A_ : float = 99 ):
'''simple docstring'''
if not 0 < percent < 1_00:
raise ValueError('''solution() only accepts values from 0 to 100''' )
_lowerCamelCase : List[str] = 0
_lowerCamelCase : List[str] = 1
while True:
if check_bouncy(A_ ):
bouncy_num += 1
if (bouncy_num / num) * 1_00 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 83 |
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 | 0 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
UpperCAmelCase = (3, 9, -11, 0, 7, 5, 1, -1)
UpperCAmelCase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A_ :
'''simple docstring'''
_UpperCamelCase : int
_UpperCamelCase : Node | None
class A_ :
'''simple docstring'''
def __init__( self , snake_case ):
lowercase = None
for i in sorted(snake_case , reverse=snake_case ):
lowercase = Node(snake_case , self.head )
def __iter__( self ):
lowercase = self.head
while node:
yield node.data
lowercase = node.next_node
def __len__( self ):
return sum(1 for _ in self )
def __str__( self ):
return " -> ".join([str(snake_case ) for node in self] )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return SortedLinkedList(list(__SCREAMING_SNAKE_CASE ) + list(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 84 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 | 0 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
SCREAMING_SNAKE_CASE__ : List[str] = HfArgumentParser(InitializationArguments)
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
SCREAMING_SNAKE_CASE__ : int = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"vocab_size": len(tokenizer),
"scale_attn_by_inverse_layer_idx": True,
"reorder_and_upcast_attn": True,
}
# Load model config (GPT-2 large in this case)
SCREAMING_SNAKE_CASE__ : Any = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
SCREAMING_SNAKE_CASE__ : Tuple = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 85 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a :List[str] = logging.get_logger(__name__)
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : Tuple = 'timm_backbone'
def __init__( self : Union[str, Any] , UpperCAmelCase : Any=None , UpperCAmelCase : str=3 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : List[Any] , ):
super().__init__(**UpperCAmelCase )
A_ = backbone
A_ = num_channels
A_ = features_only
A_ = use_pretrained_backbone
A_ = True
A_ = out_indices if out_indices is not None else (-1,)
| 86 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
"""simple docstring"""
with open(lowercase_ ) as metadata_file:
A__ = json.load(lowercase_ )
A__ = LukeConfig(use_entity_aware_attention=lowercase_ , **metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
A__ = torch.load(lowercase_ , map_location='''cpu''' )['''module''']
# Load the entity vocab file
A__ = load_original_entity_vocab(lowercase_ )
# add an entry for [MASK2]
A__ = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
A__ = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
A__ = AddedToken('''<ent>''' , lstrip=lowercase_ , rstrip=lowercase_ )
A__ = AddedToken('''<ent2>''' , lstrip=lowercase_ , rstrip=lowercase_ )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(lowercase_ )
with open(os.path.join(lowercase_ , '''tokenizer_config.json''' ) , '''r''' ) as f:
A__ = json.load(lowercase_ )
A__ = '''MLukeTokenizer'''
with open(os.path.join(lowercase_ , '''tokenizer_config.json''' ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
with open(os.path.join(lowercase_ , MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f:
json.dump(lowercase_ , lowercase_ )
A__ = MLukeTokenizer.from_pretrained(lowercase_ )
# Initialize the embeddings of the special tokens
A__ = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
A__ = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
A__ = state_dict['''embeddings.word_embeddings.weight''']
A__ = word_emb[ent_init_index].unsqueeze(0 )
A__ = word_emb[enta_init_index].unsqueeze(0 )
A__ = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
A__ = state_dict[bias_name]
A__ = decoder_bias[ent_init_index].unsqueeze(0 )
A__ = decoder_bias[enta_init_index].unsqueeze(0 )
A__ = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
A__ = f"""encoder.layer.{layer_index}.attention.self."""
A__ = state_dict[prefix + matrix_name]
A__ = state_dict[prefix + matrix_name]
A__ = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
A__ = state_dict['''entity_embeddings.entity_embeddings.weight''']
A__ = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
A__ = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
A__ = state_dict['''entity_predictions.bias''']
A__ = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
A__ = torch.cat([entity_prediction_bias, entity_mask_bias] )
A__ = LukeForMaskedLM(config=lowercase_ ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
A__ = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
A__ = state_dict[key]
else:
A__ = state_dict[key]
A__ , A__ = model.load_state_dict(lowercase_ , strict=lowercase_ )
if set(lowercase_ ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(lowercase_ ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
A__ = MLukeTokenizer.from_pretrained(lowercase_ , task='''entity_classification''' )
A__ = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
A__ = (0, 9)
A__ = tokenizer(lowercase_ , entity_spans=[span] , return_tensors='''pt''' )
A__ = model(**lowercase_ )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A__ = torch.Size((1, 33, 768) )
A__ = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
A__ = torch.Size((1, 1, 768) )
A__ = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
f""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowercase_ , atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
A__ = MLukeTokenizer.from_pretrained(lowercase_ )
A__ = '''Tokyo is the capital of <mask>.'''
A__ = (24, 30)
A__ = tokenizer(lowercase_ , entity_spans=[span] , return_tensors='''pt''' )
A__ = model(**lowercase_ )
A__ = encoding['''input_ids'''][0].tolist()
A__ = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
A__ = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(lowercase_ )
A__ = outputs.entity_logits[0][0].argmax().item()
A__ = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(lowercase_ ) )
model.save_pretrained(lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
A__ = [json.loads(lowercase_ ) for line in open(lowercase_ )]
A__ = {}
for entry in data:
A__ = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
A__ = entity_id
break
A__ = f"""{language}:{entity_name}"""
A__ = entity_id
return new_mapping
if __name__ == "__main__":
_lowerCamelCase : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 87 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def _snake_case ( ):
"""simple docstring"""
from torch.utils.cpp_extension import load
_lowerCamelCase : Optional[Any] = Path(__snake_case ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
_lowerCamelCase : int = [
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , __snake_case , with_cuda=__snake_case , extra_include_paths=[str(__snake_case )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 88 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48 | 0 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 0 |
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''' )
@patch('''builtins.open''' )
def _snake_case ( A , A ) -> List[Any]:
# ===== initialization =====
lowerCAmelCase__ = Mock()
lowerCAmelCase__ = conn, Mock()
lowerCAmelCase__ = iter([1, None] )
lowerCAmelCase__ = lambda A : next(A )
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=A )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 90 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Tuple ,A_ : List[Any]=7 ,A_ : Dict=3 ,A_ : Any=18 ,A_ : List[Any]=30 ,A_ : List[Any]=400 ,A_ : List[Any]=True ,A_ : Optional[int]=None ,A_ : int=True ,A_ : Any=None ,A_ : Optional[Any]=True ,A_ : Union[str, Any]=[0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73] ,A_ : str=[0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11] ,A_ : Union[str, Any]=True ,) -> int:
A = size if size is not None else {'height': 224, 'width': 224}
A = crop_size if crop_size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_center_crop
A = crop_size
A = do_normalize
A = image_mean
A = image_std
A = do_convert_rgb
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=False ,A_ : List[str]=False ,A_ : Optional[Any]=False ) -> List[Any]:
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
A = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
A = []
for i in range(self.batch_size ):
A , A = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
A = [Image.fromarray(np.moveaxis(A_ ,0 ,-1 ) ) for x in image_inputs]
if torchify:
A = [torch.from_numpy(A_ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = ChineseCLIPImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ChineseCLIPImageProcessingTester(self ,do_center_crop=A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_center_crop' ) )
self.assertTrue(hasattr(A_ ,'center_crop' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
self.assertTrue(hasattr(A_ ,'image_mean' ) )
self.assertTrue(hasattr(A_ ,'image_std' ) )
self.assertTrue(hasattr(A_ ,'do_convert_rgb' ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 224, 'width': 224} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ ,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = ChineseCLIPImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=A_ )
A = 3
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_center_crop' ) )
self.assertTrue(hasattr(A_ ,'center_crop' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
self.assertTrue(hasattr(A_ ,'image_mean' ) )
self.assertTrue(hasattr(A_ ,'image_std' ) )
self.assertTrue(hasattr(A_ ,'do_convert_rgb' ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
# Initialize image_processing
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = self.image_processor_tester.prepare_inputs(equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 91 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : int = 1000 ) -> int:
lowercase : Optional[Any] =2**power
lowercase : Dict =0
while n:
lowercase , lowercase : Optional[Any] =r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 92 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 0 |
"""simple docstring"""
import random
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = a[left_index]
lowerCAmelCase__ :Tuple = left_index + 1
for j in range(left_index + 1 , _SCREAMING_SNAKE_CASE ):
if a[j] < pivot:
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = a[i], a[j]
i += 1
lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = a[i - 1], a[left_index]
return i - 1
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
if left < right:
lowerCAmelCase__ :List[str] = random.randint(_SCREAMING_SNAKE_CASE , right - 1 )
lowerCAmelCase__ , lowerCAmelCase__ :Dict = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
lowerCAmelCase__ :Dict = partition(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
quick_sort_random(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_SCREAMING_SNAKE_CASE , pivot_index + 1 , _SCREAMING_SNAKE_CASE ) # recursive quicksort to the right of the pivot point
def __A () ->int:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = input('Enter numbers separated by a comma:\n' ).strip()
lowerCAmelCase__ :Tuple = [int(_SCREAMING_SNAKE_CASE ) for item in user_input.split(',' )]
quick_sort_random(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) )
print(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 93 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ = BartTokenizer
def __init__( self : int , UpperCAmelCase : Tuple=None , UpperCAmelCase : int=None , UpperCAmelCase : int=None , UpperCAmelCase : str="replace" , UpperCAmelCase : Any="<s>" , UpperCAmelCase : Any="</s>" , UpperCAmelCase : int="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : Optional[int]="<unk>" , UpperCAmelCase : Optional[int]="<pad>" , UpperCAmelCase : Tuple="<mask>" , UpperCAmelCase : Any=False , UpperCAmelCase : Union[str, Any]=True , **UpperCAmelCase : Optional[Any] , ) -> Tuple:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Any =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , UpperCAmelCase ) != add_prefix_space:
lowercase : int =getattr(UpperCAmelCase , pre_tok_state.pop('''type''' ) )
lowercase : List[Any] =add_prefix_space
lowercase : List[str] =pre_tok_class(**UpperCAmelCase )
lowercase : Optional[Any] =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowercase : Dict ='''post_processor'''
lowercase : Optional[Any] =getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
lowercase : str =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowercase : Optional[int] =tuple(state['''sep'''] )
if "cls" in state:
lowercase : int =tuple(state['''cls'''] )
lowercase : Any =False
if state.get('''add_prefix_space''' , UpperCAmelCase ) != add_prefix_space:
lowercase : Union[str, Any] =add_prefix_space
lowercase : str =True
if state.get('''trim_offsets''' , UpperCAmelCase ) != trim_offsets:
lowercase : List[Any] =trim_offsets
lowercase : Tuple =True
if changes_to_apply:
lowercase : List[str] =getattr(UpperCAmelCase , state.pop('''type''' ) )
lowercase : str =component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
def A__ ( self : int ) -> str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def A__ ( self : Optional[Any] , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
lowercase : str =value
def A__ ( self : int , *UpperCAmelCase : Dict , **UpperCAmelCase : Dict ) -> BatchEncoding:
'''simple docstring'''
lowercase : Optional[Any] =kwargs.get('''is_split_into_words''' , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> BatchEncoding:
'''simple docstring'''
lowercase : str =kwargs.get('''is_split_into_words''' , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'''to use it with pretokenized inputs.''' )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : Optional[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : List[Any]=None ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def A__ ( self : Union[str, Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Union[str, Any] =[self.sep_token_id]
lowercase : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 94 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ (__A , unittest.TestCase ):
__magic_name__ = ConsistencyModelPipeline
__magic_name__ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
__magic_name__ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
__magic_name__ = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
UpperCAmelCase_ : Union[str, Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet" , )
return unet
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
UpperCAmelCase_ : Optional[Any] = UNetaDModel.from_pretrained(
"diffusers/consistency-models-test" , subfolder="test_unet_class_cond" , )
return unet
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : Optional[Any]=False ) -> List[Any]:
if class_cond:
UpperCAmelCase_ : Union[str, Any] = self.dummy_cond_unet
else:
UpperCAmelCase_ : Any = self.dummy_uncond_unet
# Default to CM multistep sampler
UpperCAmelCase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : Any = {
"unet": unet,
"scheduler": scheduler,
}
return components
def _SCREAMING_SNAKE_CASE ( self : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any=0 ) -> List[Any]:
if str(lowerCAmelCase_ ).startswith("mps" ):
UpperCAmelCase_ : Any = torch.manual_seed(lowerCAmelCase_ )
else:
UpperCAmelCase_ : List[str] = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = {
"batch_size": 1,
"num_inference_steps": None,
"timesteps": [22, 0],
"generator": generator,
"output_type": "np",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
UpperCAmelCase_ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : List[str] = ConsistencyModelPipeline(**lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ : Any = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : List[Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Optional[int] = self.get_dummy_components(class_cond=lowerCAmelCase_ )
UpperCAmelCase_ : str = ConsistencyModelPipeline(**lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Tuple = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Dict = self.get_dummy_components()
UpperCAmelCase_ : Tuple = ConsistencyModelPipeline(**lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = 1
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : Dict = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
UpperCAmelCase_ : int = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ : Tuple = self.get_dummy_components(class_cond=lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = ConsistencyModelPipeline(**lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : int = self.get_dummy_inputs(lowerCAmelCase_ )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[Any] = None
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ : List[str] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Optional[int] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class UpperCamelCase_ (unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : Any=0 , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]="cpu" , lowerCAmelCase_ : Dict=torch.floataa , lowerCAmelCase_ : Optional[Any]=(1, 3, 64, 64) ) -> str:
UpperCAmelCase_ : str = torch.manual_seed(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = {
"num_inference_steps": None,
"timesteps": [22, 0],
"class_labels": 0,
"generator": generator,
"output_type": "np",
}
if get_fixed_latents:
UpperCAmelCase_ : str = self.get_fixed_latents(seed=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ , shape=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = latents
return inputs
def _SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase_ : List[Any]=0 , lowerCAmelCase_ : Any="cpu" , lowerCAmelCase_ : Optional[Any]=torch.floataa , lowerCAmelCase_ : int=(1, 3, 64, 64) ) -> Optional[Any]:
if type(lowerCAmelCase_ ) == str:
UpperCAmelCase_ : Tuple = torch.device(lowerCAmelCase_ )
UpperCAmelCase_ : Tuple = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
UpperCAmelCase_ : List[Any] = randn_tensor(lowerCAmelCase_ , generator=lowerCAmelCase_ , device=lowerCAmelCase_ , dtype=lowerCAmelCase_ )
return latents
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ : str = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : int = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = self.get_inputs()
UpperCAmelCase_ : int = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
UpperCAmelCase_ : List[str] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : str = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : str = self.get_inputs()
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Union[str, Any] = None
UpperCAmelCase_ : List[str] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
UpperCAmelCase_ : Optional[int] = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ : int = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : Optional[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[Any] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase_ : int = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@require_torch_a
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
UpperCAmelCase_ : Tuple = UNetaDModel.from_pretrained("diffusers/consistency_models" , subfolder="diffusers_cd_imagenet64_l2" )
UpperCAmelCase_ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
UpperCAmelCase_ : Optional[Any] = ConsistencyModelPipeline(unet=lowerCAmelCase_ , scheduler=lowerCAmelCase_ )
pipe.to(torch_device=lowerCAmelCase_ , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.get_inputs(get_fixed_latents=lowerCAmelCase_ , device=lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Optional[int] = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=lowerCAmelCase_ , enable_math=lowerCAmelCase_ , enable_mem_efficient=lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[int] = pipe(**lowerCAmelCase_ ).images
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
UpperCAmelCase_ : Dict = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 95 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __A ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
UpperCAmelCase__ = StableDiffusionXLImgaImgPipeline
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
UpperCAmelCase__ = PipelineTesterMixin.required_optional_params - {"latents"}
UpperCAmelCase__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
UpperCAmelCase__ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase__ ( self : List[str] ) -> str:
torch.manual_seed(0 )
__magic_name__: Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , attention_head_dim=(2, 4) , use_linear_projection=__snake_case , addition_embed_type="""text_time""" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
__magic_name__: Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , steps_offset=1 , beta_schedule="""scaled_linear""" , timestep_spacing="""leading""" , )
torch.manual_seed(0 )
__magic_name__: Optional[int] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__magic_name__: List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=3_2 , )
__magic_name__: Tuple = CLIPTextModel(__snake_case )
__magic_name__: Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__snake_case )
__magic_name__: Union[str, Any] = CLIPTextModelWithProjection(__snake_case )
__magic_name__: List[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" , local_files_only=__snake_case )
__magic_name__: List[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def lowerCamelCase__ ( self : Any , __snake_case : List[Any] , __snake_case : Union[str, Any]=0 ) -> int:
__magic_name__: Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
__magic_name__: str = image / 2 + 0.5
if str(__snake_case ).startswith("""mps""" ):
__magic_name__: Optional[Any] = torch.manual_seed(__snake_case )
else:
__magic_name__: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: Union[str, Any] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def lowerCamelCase__ ( self : int ) -> List[Any]:
__magic_name__: Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__: Optional[Any] = self.get_dummy_components()
__magic_name__: Union[str, Any] = StableDiffusionXLImgaImgPipeline(**__snake_case )
__magic_name__: int = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Union[str, Any] = self.get_dummy_inputs(__snake_case )
__magic_name__: str = sd_pipe(**__snake_case ).images
__magic_name__: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__magic_name__: int = np.array([0.4656, 0.4840, 0.4439, 0.6698, 0.5574, 0.4524, 0.5799, 0.5943, 0.5165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase__ ( self : str ) -> Optional[Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
pass
def lowerCamelCase__ ( self : Tuple ) -> int:
__magic_name__: str = self.get_dummy_components()
__magic_name__: Any = StableDiffusionXLImgaImgPipeline(**__snake_case )
__magic_name__: int = sd_pipe.to(__snake_case )
__magic_name__: Optional[Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
# forward without prompt embeds
__magic_name__: int = self.get_dummy_inputs(__snake_case )
__magic_name__: str = 3 * ["""this is a negative prompt"""]
__magic_name__: str = negative_prompt
__magic_name__: Union[str, Any] = 3 * [inputs["""prompt"""]]
__magic_name__: Any = sd_pipe(**__snake_case )
__magic_name__: List[str] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
__magic_name__: Any = self.get_dummy_inputs(__snake_case )
__magic_name__: int = 3 * ["""this is a negative prompt"""]
__magic_name__: List[str] = 3 * [inputs.pop("""prompt""" )]
(
(
__magic_name__
), (
__magic_name__
), (
__magic_name__
), (
__magic_name__
),
): List[str] = sd_pipe.encode_prompt(__snake_case , negative_prompt=__snake_case )
__magic_name__: Any = sd_pipe(
**__snake_case , prompt_embeds=__snake_case , negative_prompt_embeds=__snake_case , pooled_prompt_embeds=__snake_case , negative_pooled_prompt_embeds=__snake_case , )
__magic_name__: Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class __A ( unittest.TestCase ):
def lowerCamelCase__ ( self : List[Any] ) -> Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase__ ( self : int , __snake_case : Dict , __snake_case : Any="cpu" , __snake_case : Union[str, Any]=torch.floataa , __snake_case : int=0 ) -> List[str]:
__magic_name__: Dict = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
__magic_name__: str = np.random.RandomState(__snake_case ).standard_normal((1, 4, 6_4, 6_4) )
__magic_name__: List[Any] = torch.from_numpy(__snake_case ).to(device=__snake_case , dtype=__snake_case )
__magic_name__: int = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
__magic_name__: Optional[int] = DiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-base""" )
pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
__magic_name__: Tuple = self.get_inputs(__snake_case )
__magic_name__: Optional[int] = pipe(**__snake_case ).images
__magic_name__: str = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__magic_name__: Dict = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 96 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'timm_backbone'
def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 48 | 0 |
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
__a = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
__a = '</w>'
__a = '@@ '
def a ( snake_case__: Dict ):
'''simple docstring'''
lowercase_ = set()
lowercase_ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase_ = char
return pairs
# Speech2Text2 has no max input length
__a = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :int = VOCAB_FILES_NAMES
a :Optional[int] = PRETRAINED_VOCAB_FILES_MAP
a :List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a :Tuple = ['input_ids', 'attention_mask']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any]="<s>" , SCREAMING_SNAKE_CASE_ : Tuple="<pad>" , SCREAMING_SNAKE_CASE_ : int="</s>" , SCREAMING_SNAKE_CASE_ : int="<unk>" , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : Optional[Any]=None , **SCREAMING_SNAKE_CASE_ : str , ) -> Optional[int]:
super().__init__(
unk_token=SCREAMING_SNAKE_CASE_ , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , pad_token=SCREAMING_SNAKE_CASE_ , do_lower_case=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
lowercase_ = do_lower_case
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as vocab_handle:
lowercase_ = json.load(SCREAMING_SNAKE_CASE_ )
lowercase_ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowercase_ = None
lowercase_ = None
else:
with open(SCREAMING_SNAKE_CASE_ , encoding='''utf-8''' ) as merges_handle:
lowercase_ = merges_handle.read().split('''\n''' )[:-1]
lowercase_ = [tuple(merge.split()[:2] ) for merge in merges]
lowercase_ = dict(zip(SCREAMING_SNAKE_CASE_ , range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
lowercase_ = {}
@property
def _lowercase ( self : Any ) -> int:
return len(self.decoder )
def _lowercase ( self : List[str] ) -> Dict:
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : Any ) -> int:
lowercase_ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
lowercase_ = min(SCREAMING_SNAKE_CASE_ , key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase_ , lowercase_ = bigram
lowercase_ = []
lowercase_ = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
lowercase_ = word.index(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase_ = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase_ = tuple(SCREAMING_SNAKE_CASE_ )
lowercase_ = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
lowercase_ = get_pairs(SCREAMING_SNAKE_CASE_ )
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
if word == "\n " + BPE_TOKEN_MERGES:
lowercase_ = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(SCREAMING_SNAKE_CASE_ ):
lowercase_ = word.replace(SCREAMING_SNAKE_CASE_ , '''''' )
lowercase_ = word.replace(''' ''' , SCREAMING_SNAKE_CASE_ )
lowercase_ = word
return word
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
if self.bpe_ranks is None:
raise ValueError(
'''This tokenizer was instantiated without a `merges.txt` file, so'''
''' that it can only be used for decoding, not for encoding.'''
'''Make sure to provide `merges.txt` file at instantiation to enable '''
'''encoding.''' )
if self.do_lower_case:
lowercase_ = text.lower()
lowercase_ = text.split()
lowercase_ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(SCREAMING_SNAKE_CASE_ ).split(''' ''' ) ) )
return split_tokens
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ) -> int:
return self.encoder.get(SCREAMING_SNAKE_CASE_ , self.encoder.get(self.unk_token ) )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int ) -> str:
lowercase_ = self.decoder.get(SCREAMING_SNAKE_CASE_ , self.unk_token )
return result
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : List[str] ) -> str:
lowercase_ = ''' '''.join(SCREAMING_SNAKE_CASE_ )
# make sure @@ tokens are concatenated
lowercase_ = ''''''.join(string.split(SCREAMING_SNAKE_CASE_ ) )
return string
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase_ = os.path.join(
SCREAMING_SNAKE_CASE_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ , ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '''\n''' )
lowercase_ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(SCREAMING_SNAKE_CASE_ , '''w''' , encoding='''utf-8''' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
lowercase_ = token_index
writer.write(''' '''.join(SCREAMING_SNAKE_CASE_ ) + '''\n''' )
index += 1
return (vocab_file, merges_file)
| 97 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48 | 0 |
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
lowercase__ : Optional[int] = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class __lowerCAmelCase ( unittest.TestCase , __magic_name__ ):
"""simple docstring"""
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = load_tool('''text-question-answering''' )
self.tool.setup()
_UpperCamelCase = load_tool('''text-question-answering''' , remote=lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.tool(lowerCAmelCase__ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase__ , '''launched the BigScience Research Workshop''' )
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = self.remote_tool(lowerCAmelCase__ , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase__ , '''launched the BigScience Research Workshop''' )
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.tool(text=lowerCAmelCase__ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase__ , '''launched the BigScience Research Workshop''' )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = self.remote_tool(text=lowerCAmelCase__ , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(lowerCAmelCase__ , '''launched the BigScience Research Workshop''' )
| 98 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 0 |
import collections
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = '▁'
SCREAMING_SNAKE_CASE = {'vocab_file': 'prophetnet.tokenizer'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'microsoft/xprophetnet-large-wiki100-cased': (
'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer'
),
}
}
SCREAMING_SNAKE_CASE = {
'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False},
}
SCREAMING_SNAKE_CASE = {
'microsoft/xprophetnet-large-wiki100-cased': 5_1_2,
}
def a (lowerCAmelCase__ ):
__a = collections.OrderedDict()
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" ) as reader:
__a = reader.readlines()
for index, token in enumerate(lowerCAmelCase__ ):
__a = token.rstrip("""\n""" )
__a = index
return vocab
class __UpperCAmelCase ( __A ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["""input_ids""", """attention_mask"""]
def __init__( self , __A , __A="[SEP]" , __A="[SEP]" , __A="[SEP]" , __A="[UNK]" , __A="[PAD]" , __A="[CLS]" , __A="[MASK]" , __A = None , **__A , ):
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , sep_token=__A , unk_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
__a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# put special tokens and [unused] tokens into the vocab
__a = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4}
for i in range(10 ):
__a = f'''[unused{i}]'''
__a = 5 + i
# The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab
__a = 12
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
for k in self.fairseq_tokens_to_ids.keys():
self.unique_no_split_tokens.append(__A )
def __getstate__( self ):
__a = self.__dict__.copy()
__a = None
return state
def __setstate__( self , __A ):
__a = d
try:
import sentencepiece as spm
except ImportError:
logger.warning(
"""You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece"""
""" pip install sentencepiece""" )
raise
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case_ ( self , __A , __A = None , __A = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return ([0] * len(__A )) + [1]
return ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
def snake_case_ ( self , __A , __A = None ):
__a = [self.sep_token_id]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0]
return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def snake_case_ ( self ):
return len(self.sp_model ) + self.fairseq_offset
def snake_case_ ( self ):
__a = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def snake_case_ ( self , __A ):
return self.sp_model.encode(__A , out_type=__A )
def snake_case_ ( self , __A ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
__a = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def snake_case_ ( self , __A ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def snake_case_ ( self , __A ):
__a = """""".join(__A ).replace(__A , """ """ ).strip()
return out_string
def snake_case_ ( self , __A , __A = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
__A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , """wb""" ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def snake_case_ ( self , __A , __A = None ):
if token_ids_a is None:
return token_ids_a + [self.sep_token_id]
__a = [self.sep_token_id]
return token_ids_a + sep + token_ids_a + sep
| 99 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 0 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __snake_case ( lowerCAmelCase_ ) -> Union[str, Any]:
return EnvironmentCommand()
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
return EnvironmentCommand(args.accelerate_config_file )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def lowercase_ ( A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parser.add_parser('''env''' )
download_parser.set_defaults(func=A_ )
download_parser.add_argument(
'''--accelerate-config_file''' , default=A_ , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=A_ )
def __init__( self , A_ , *A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = accelerate_config_file
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = '''not installed'''
if is_safetensors_available():
import safetensors
SCREAMING_SNAKE_CASE__ = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
SCREAMING_SNAKE_CASE__ = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
SCREAMING_SNAKE_CASE__ = '''not installed'''
SCREAMING_SNAKE_CASE__ = SCREAMING_SNAKE_CASE__ = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(A_ ):
SCREAMING_SNAKE_CASE__ = load_config_from_file(self._accelerate_config_file ).to_dict()
SCREAMING_SNAKE_CASE__ = (
'''\n'''.join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(A_ , A_ )
else f'''\t{accelerate_config}'''
)
SCREAMING_SNAKE_CASE__ = '''not installed'''
SCREAMING_SNAKE_CASE__ = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ = torch.__version__
SCREAMING_SNAKE_CASE__ = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ = '''not installed'''
SCREAMING_SNAKE_CASE__ = '''NA'''
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ = tf.__version__
try:
# deprecated in v2.1
SCREAMING_SNAKE_CASE__ = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
SCREAMING_SNAKE_CASE__ = bool(tf.config.list_physical_devices('''GPU''' ) )
SCREAMING_SNAKE_CASE__ = '''not installed'''
SCREAMING_SNAKE_CASE__ = '''not installed'''
SCREAMING_SNAKE_CASE__ = '''not installed'''
SCREAMING_SNAKE_CASE__ = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
SCREAMING_SNAKE_CASE__ = flax.__version__
SCREAMING_SNAKE_CASE__ = jax.__version__
SCREAMING_SNAKE_CASE__ = jaxlib.__version__
SCREAMING_SNAKE_CASE__ = jax.lib.xla_bridge.get_backend().platform
SCREAMING_SNAKE_CASE__ = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f'''{safetensors_version}''',
'''Accelerate version''': f'''{accelerate_version}''',
'''Accelerate config''': f'''{accelerate_config_str}''',
'''PyTorch version (GPU?)''': f'''{pt_version} ({pt_cuda_available})''',
'''Tensorflow version (GPU?)''': f'''{tf_version} ({tf_cuda_available})''',
'''Flax version (CPU?/GPU?/TPU?)''': f'''{flax_version} ({jax_backend})''',
'''Jax version''': f'''{jax_version}''',
'''JaxLib version''': f'''{jaxlib_version}''',
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(A_ ) )
return info
@staticmethod
def lowercase_ ( A_ ):
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 100 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 0 |
def a__ ( A__, A__, A__, A__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = len(A__ ), len(grid[0] )
if (
min(A__, A__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
SCREAMING_SNAKE_CASE_ : Dict = 0
count += depth_first_search(A__, row + 1, A__, A__ )
count += depth_first_search(A__, row - 1, A__, A__ )
count += depth_first_search(A__, A__, col + 1, A__ )
count += depth_first_search(A__, A__, col - 1, A__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48 | 0 |
"""simple docstring"""
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[Any] = """"""
for i in table:
res += inp[i - 1]
return res
def UpperCamelCase (SCREAMING_SNAKE_CASE ):
return data[1:] + data[0]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = """"""
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Optional[int] = int("""0b""" + data[0] + data[-1] , 2 )
UpperCamelCase : Any = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Any = message[:4]
UpperCamelCase : int = message[4:]
UpperCamelCase : int = apply_table(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : str = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = apply_sbox(SCREAMING_SNAKE_CASE , temp[:4] ) # noqa: E741
UpperCamelCase : str = apply_sbox(SCREAMING_SNAKE_CASE , temp[4:] )
UpperCamelCase : Union[str, Any] = """0""" * (2 - len(SCREAMING_SNAKE_CASE )) + l # noqa: E741
UpperCamelCase : Union[str, Any] = """0""" * (2 - len(SCREAMING_SNAKE_CASE )) + r
UpperCamelCase : int = apply_table(l + r , SCREAMING_SNAKE_CASE )
UpperCamelCase : int = xor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return temp + right
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = input("""Enter 10 bit key: """)
__magic_name__ : Union[str, Any] = input("""Enter 8 bit message: """)
__magic_name__ : Dict = [6, 3, 7, 4, 8, 5, 1_0, 9]
__magic_name__ : str = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
__magic_name__ : Optional[Any] = [2, 4, 3, 1]
__magic_name__ : Tuple = [2, 6, 3, 1, 4, 8, 5, 7]
__magic_name__ : str = [4, 1, 3, 5, 7, 2, 8, 6]
__magic_name__ : str = [4, 1, 2, 3, 2, 3, 4, 1]
__magic_name__ : Any = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__magic_name__ : Any = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__magic_name__ : Dict = apply_table(key, paa_table)
__magic_name__ : Union[str, Any] = temp[:5]
__magic_name__ : Optional[Any] = temp[5:]
__magic_name__ : str = left_shift(left)
__magic_name__ : List[str] = left_shift(right)
__magic_name__ : Tuple = apply_table(left + right, pa_table)
__magic_name__ : Dict = left_shift(left)
__magic_name__ : Union[str, Any] = left_shift(right)
__magic_name__ : List[Any] = left_shift(left)
__magic_name__ : str = left_shift(right)
__magic_name__ : List[str] = apply_table(left + right, pa_table)
# encryption
__magic_name__ : List[str] = apply_table(message, IP)
__magic_name__ : str = function(expansion, sa, sa, keya, temp)
__magic_name__ : str = temp[4:] + temp[:4]
__magic_name__ : Optional[Any] = function(expansion, sa, sa, keya, temp)
__magic_name__ : Any = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
__magic_name__ : Tuple = apply_table(CT, IP)
__magic_name__ : List[str] = function(expansion, sa, sa, keya, temp)
__magic_name__ : Dict = temp[4:] + temp[:4]
__magic_name__ : int = function(expansion, sa, sa, keya, temp)
__magic_name__ : Any = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 102 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 | 0 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=5 ) -> List[str]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count('''<mask>''' ) == 1
_snake_case = torch.tensor(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ).unsqueeze(0 ) # Batch size 1
_snake_case = model(lowerCAmelCase_ )[0] # The last hidden-state is the first element of the output tuple
_snake_case = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
_snake_case = logits[0, masked_index, :]
_snake_case = logits.softmax(dim=0 )
_snake_case , _snake_case = prob.topk(k=lowerCAmelCase_ , dim=0 )
_snake_case = ''' '''.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase_ ) )] )
_snake_case = tokenizer.mask_token
_snake_case = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(''' ''' ) ):
_snake_case = predicted_token_bpe.replace('''\u2581''' , ''' ''' )
if " {0}".format(lowerCAmelCase_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(''' {0}'''.format(lowerCAmelCase_ ) , lowerCAmelCase_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowerCAmelCase_ , lowerCAmelCase_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
snake_case = CamembertTokenizer.from_pretrained('''camembert-base''')
snake_case = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
snake_case = '''Le camembert est <mask> :)'''
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 103 |
'''simple docstring'''
from math import sqrt
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase_ ):
total += i + n // i
elif i == sqrt(UpperCamelCase_ ):
total += i
return total - n
def A ( UpperCamelCase_ : int = 1_00_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = sum(
i
for i in range(1 , UpperCamelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 48 | 0 |
"""simple docstring"""
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def _lowerCamelCase ( UpperCAmelCase_ : bool = True, *UpperCAmelCase_ : List[Any], **UpperCAmelCase_ : List[Any] ) -> Any:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError("Accelerate's `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`." )
A__ = False
if main_process_only:
A__ = PartialState().local_process_index == 0
return _tqdm(*UpperCAmelCase_, **UpperCAmelCase_, disable=UpperCAmelCase_ )
| 104 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase_ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = features.mean(1 )
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.shape[1]
lowerCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
lowerCAmelCase__ = features.mean(1 )
# Center the dataset
lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) )
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1]
lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ ,lowerCAmelCase__ = eigh(
covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ )
lowerCAmelCase__ = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = linear_discriminant_analysis(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ )
if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = old_name
if "patch_embed" in old_name:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = old_name.split('.' )
if layer == "0":
SCREAMING_SNAKE_CASE_ : Any = old_name.replace('0' , 'convolution1' )
elif layer == "1":
SCREAMING_SNAKE_CASE_ : List[str] = old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
SCREAMING_SNAKE_CASE_ : Any = old_name.replace('3' , 'convolution2' )
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(R'\d\.\d' , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = R'\b\d{2}\b'
if bool(re.search(lowerCamelCase_ , lowerCamelCase_ ) ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = re.search(R'\d\.\d\d.' , lowerCamelCase_ ).group()
else:
SCREAMING_SNAKE_CASE_ : List[Any] = re.search(R'\d\.\d.' , lowerCamelCase_ ).group()
if int(match[0] ) < 6:
SCREAMING_SNAKE_CASE_ : int = old_name.replace(lowerCamelCase_ , '' )
SCREAMING_SNAKE_CASE_ : Dict = trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
SCREAMING_SNAKE_CASE_ : List[Any] = 'intermediate_stages.' + trimmed_name
else:
SCREAMING_SNAKE_CASE_ : Any = old_name.replace(lowerCamelCase_ , '' )
if int(match[2] ) < num_meta4D_last_stage:
SCREAMING_SNAKE_CASE_ : str = trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
SCREAMING_SNAKE_CASE_ : Dict = str(int(match[2] ) - num_meta4D_last_stage )
SCREAMING_SNAKE_CASE_ : Optional[Any] = trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
SCREAMING_SNAKE_CASE_ : Any = trimmed_name.replace('fc2' , 'linear_out' )
SCREAMING_SNAKE_CASE_ : List[Any] = 'last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'.\d.' , lowerCamelCase_ ):
SCREAMING_SNAKE_CASE_ : str = old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
SCREAMING_SNAKE_CASE_ : List[Any] = new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
SCREAMING_SNAKE_CASE_ : Tuple = new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
SCREAMING_SNAKE_CASE_ : Tuple = new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
SCREAMING_SNAKE_CASE_ : Tuple = new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
SCREAMING_SNAKE_CASE_ : Any = 'efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
SCREAMING_SNAKE_CASE_ : Optional[int] = new_name.replace('norm' , 'layernorm' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'efficientformer.' + new_name
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'efficientformer.encoder.' + new_name
return new_name
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : str ) -> Dict:
"""simple docstring"""
for key in checkpoint.copy().keys():
SCREAMING_SNAKE_CASE_ : Optional[Any] = checkpoint.pop(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = val
return checkpoint
def __UpperCAmelCase ( ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw )
return image
def __UpperCAmelCase ( lowerCamelCase_ : Path , lowerCamelCase_ : Path , lowerCamelCase_ : Path , lowerCamelCase_ : bool ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = torch.load(lowerCamelCase_ , map_location='cpu' )['model']
SCREAMING_SNAKE_CASE_ : Any = EfficientFormerConfig.from_json_file(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = EfficientFormerForImageClassificationWithTeacher(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Optional[int] = '_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
SCREAMING_SNAKE_CASE_ : Dict = config.depths[-1] - config.num_metaad_blocks + 1
SCREAMING_SNAKE_CASE_ : Tuple = convert_torch_checkpoint(lowerCamelCase_ , lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
SCREAMING_SNAKE_CASE_ : List[str] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = 2_56
SCREAMING_SNAKE_CASE_ : Any = 2_24
SCREAMING_SNAKE_CASE_ : Any = EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = processor(images=lowerCamelCase_ , return_tensors='pt' ).pixel_values
# original processing pipeline
SCREAMING_SNAKE_CASE_ : List[str] = Compose(
[
Resize(lowerCamelCase_ , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(lowerCamelCase_ ),
ToTensor(),
Normalize(lowerCamelCase_ , lowerCamelCase_ ),
] )
SCREAMING_SNAKE_CASE_ : Any = image_transforms(lowerCamelCase_ ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = model(lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : str = outputs.logits
SCREAMING_SNAKE_CASE_ : Any = (1, 10_00)
if "l1" in model_name:
SCREAMING_SNAKE_CASE_ : str = torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :10] , lowerCamelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :10] , lowerCamelCase_ , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(lowerCamelCase_ )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=lowerCamelCase_ , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=lowerCamelCase_ , )
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
UpperCamelCase__ : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 105 |
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
A = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
A = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
A = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
A = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
A = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
A = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
A = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
A = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
A = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
A = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
A = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
A = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
A = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
A = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
A = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
A = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
A = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
A = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
A = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
A = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
A = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
A = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
A = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
A = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def lowerCamelCase_ ( lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ) -> int:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A = key.split('.' )
A , A = int(key_split[2] ), int(key_split[4] )
A = config.vision_config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A = key.split('.' )
A = int(key_split[3] )
A = config.text_config.hidden_size
if "weight" in key:
A = val[:dim, :]
A = val[
dim : dim * 2, :
]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[dim : dim * 2]
A = val[-dim:]
else:
A = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A = val.squeeze_()
else:
A = val
return orig_state_dict
def lowerCamelCase_ ( ) -> List[Any]:
'''simple docstring'''
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int]="groupvit-gcc-yfcc" , lowerCAmelCase__ : str=False ) -> Any:
'''simple docstring'''
A = GroupViTConfig()
A = GroupViTModel(lowerCAmelCase__ ).eval()
A = torch.load(lowerCAmelCase__ , map_location='cpu' )['model']
A = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
A , A = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
A = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
A = prepare_img()
A = processor(text=['a photo of a cat', 'a photo of a dog'] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='pt' )
with torch.no_grad():
A = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
A = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
A = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(F'''Model name {model_name} not supported.''' )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1E-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('Successfully saved processor and model to' , lowerCAmelCase__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(lowerCAmelCase__ , organization='nielsr' )
model.push_to_hub(lowerCAmelCase__ , organization='nielsr' )
if __name__ == "__main__":
__snake_case :List[Any] =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__snake_case :Any =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 106 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 | 0 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : str ) -> Dict:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
_A , _A = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-canny', from_pt=UpperCamelCase__, dtype=jnp.bfloataa )
_A , _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', controlnet=UpperCamelCase__, from_pt=UpperCamelCase__, dtype=jnp.bfloataa )
_A = controlnet_params
_A = 'bird'
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' )
_A = pipe.prepare_image_inputs([canny_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(UpperCamelCase__, jax.device_count() )
_A = replicate(UpperCamelCase__ )
_A = shard(UpperCamelCase__ )
_A = shard(UpperCamelCase__ )
_A = pipe(
prompt_ids=UpperCamelCase__, image=UpperCamelCase__, params=UpperCamelCase__, prng_seed=UpperCamelCase__, num_inference_steps=50, jit=UpperCamelCase__, ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_53:2_56, 2_53:2_56, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
_A , _A = FlaxControlNetModel.from_pretrained(
'lllyasviel/sd-controlnet-openpose', from_pt=UpperCamelCase__, dtype=jnp.bfloataa )
_A , _A = FlaxStableDiffusionControlNetPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5', controlnet=UpperCamelCase__, from_pt=UpperCamelCase__, dtype=jnp.bfloataa )
_A = controlnet_params
_A = 'Chef in the kitchen'
_A = jax.device_count()
_A = pipe.prepare_text_inputs([prompts] * num_samples )
_A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' )
_A = pipe.prepare_image_inputs([pose_image] * num_samples )
_A = jax.random.PRNGKey(0 )
_A = jax.random.split(UpperCamelCase__, jax.device_count() )
_A = replicate(UpperCamelCase__ )
_A = shard(UpperCamelCase__ )
_A = shard(UpperCamelCase__ )
_A = pipe(
prompt_ids=UpperCamelCase__, image=UpperCamelCase__, params=UpperCamelCase__, prng_seed=UpperCamelCase__, num_inference_steps=50, jit=UpperCamelCase__, ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
_A = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_A = images[0, 2_53:2_56, 2_53:2_56, -1]
_A = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_A = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 107 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 0 |
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = ['''image_processor''', '''tokenizer''']
_lowerCamelCase = '''OwlViTImageProcessor'''
_lowerCamelCase = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Union[str, Any] , lowerCamelCase : List[Any]=None , lowerCamelCase : Optional[int]=None , **lowerCamelCase : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , lowerCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : Optional[Any] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[int]=None , lowerCamelCase : str=None , lowerCamelCase : List[Any]="max_length" , lowerCamelCase : List[Any]="np" , **lowerCamelCase : Union[str, Any] ) -> Any:
"""simple docstring"""
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(lowerCamelCase , lowerCamelCase ) or (isinstance(lowerCamelCase , lowerCamelCase ) and not isinstance(text[0] , lowerCamelCase )):
_UpperCAmelCase = [self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )]
elif isinstance(lowerCamelCase , lowerCamelCase ) and isinstance(text[0] , lowerCamelCase ):
_UpperCAmelCase = []
# Maximum number of queries across batch
_UpperCAmelCase = max([len(lowerCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(lowerCamelCase ) != max_num_queries:
_UpperCAmelCase = t + [""" """] * (max_num_queries - len(lowerCamelCase ))
_UpperCAmelCase = self.tokenizer(lowerCamelCase , padding=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
encodings.append(lowerCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
_UpperCAmelCase = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_UpperCAmelCase = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_UpperCAmelCase = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
_UpperCAmelCase = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_UpperCAmelCase = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
_UpperCAmelCase = BatchEncoding()
_UpperCAmelCase = input_ids
_UpperCAmelCase = attention_mask
if query_images is not None:
_UpperCAmelCase = BatchEncoding()
_UpperCAmelCase = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase ).pixel_values
_UpperCAmelCase = query_pixel_values
if images is not None:
_UpperCAmelCase = self.image_processor(lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase )
if text is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCamelCase ) , tensor_type=lowerCamelCase )
def lowerCamelCase ( self : List[Any] , *lowerCamelCase : Dict , **lowerCamelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor.post_process(*lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : List[str] , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
return self.image_processor.post_process_object_detection(*lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : List[Any] , *lowerCamelCase : int , **lowerCamelCase : Tuple ) -> List[str]:
"""simple docstring"""
return self.image_processor.post_process_image_guided_detection(*lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : Dict , *lowerCamelCase : Union[str, Any] , **lowerCamelCase : Dict ) -> Union[str, Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def lowerCamelCase ( self : int , *lowerCamelCase : List[Any] , **lowerCamelCase : Tuple ) -> int:
"""simple docstring"""
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def lowerCamelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , lowerCamelCase , )
return self.image_processor_class
@property
def lowerCamelCase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , lowerCamelCase , )
return self.image_processor
| 108 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 | 0 |
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
a = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
a = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def __magic_name__ ( __UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for i in range(len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__SCREAMING_SNAKE_CASE = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(__UpperCAmelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(__UpperCAmelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(__UpperCAmelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__SCREAMING_SNAKE_CASE = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(__UpperCAmelCase )
return next_generation
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[Image.Image]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for _ in range(__UpperCAmelCase ):
# Create output image
__SCREAMING_SNAKE_CASE = Image.new("""RGB""" , (len(cells[0] ), len(__UpperCAmelCase )) )
__SCREAMING_SNAKE_CASE = img.load()
# Save cells to image
for x in range(len(__UpperCAmelCase ) ):
for y in range(len(cells[0] ) ):
__SCREAMING_SNAKE_CASE = 255 - cells[y][x] * 255
__SCREAMING_SNAKE_CASE = (colour, colour, colour)
# Save image
images.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = new_generation(__UpperCAmelCase )
return images
if __name__ == "__main__":
a = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 109 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 | 0 |
"""simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('3.8'):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCamelCase__ = ''
if version.parse(importlib_metadata.version('jiwer')) < version.parse('2.3.0'):
class a ( tr.AbstractTransform ):
def __init__( self , UpperCamelCase_ = " " ):
UpperCAmelCase__ : Union[str, Any] = sentence_delimiter
def __snake_case ( self , UpperCamelCase_ ):
return list(UpperCamelCase_ )
def __snake_case ( self , UpperCamelCase_ ):
UpperCAmelCase__ : Tuple = []
for sent_idx, sentence in enumerate(UpperCamelCase_ ):
chars.extend(self.process_string(UpperCamelCase_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCamelCase_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCamelCase__ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCamelCase__ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCamelCase__ = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
UpperCamelCase__ = '\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n'
UpperCamelCase__ = '\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> cer = datasets.load_metric("cer")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/jitsi/jiwer/'] , reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
'https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates',
] , )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=False ):
if concatenate_texts:
return jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )["wer"]
UpperCAmelCase__ : Union[str, Any] = 0
UpperCAmelCase__ : Tuple = 0
for prediction, reference in zip(UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : Union[str, Any] = jiwer.compute_measures(
UpperCamelCase_ , UpperCamelCase_ , truth_transform=UpperCamelCase_ , hypothesis_transform=UpperCamelCase_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 110 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48 | 0 |
def lowerCAmelCase__ ( ) -> int:
'''simple docstring'''
A__ = []
A__ = 1
while len(UpperCamelCase_ ) < 1e6:
constant.append(str(UpperCamelCase_ ) )
i += 1
A__ = "".join(UpperCamelCase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[9_9] )
* int(constant[9_9_9] )
* int(constant[9_9_9_9] )
* int(constant[9_9_9_9_9] )
* int(constant[9_9_9_9_9_9] )
)
if __name__ == "__main__":
print(solution())
| 514 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
UpperCAmelCase__ = R"\n [`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and\n can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.\n\n Args:\n title_sep (`str`, *optional*, defaults to `\" / \"`):\n Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].\n doc_sep (`str`, *optional*, defaults to `\" // \"`):\n Separator inserted between the text of the retrieved document and the original input when calling\n [`RagRetriever`].\n n_docs (`int`, *optional*, defaults to 5):\n Number of documents to retrieve.\n max_combined_length (`int`, *optional*, defaults to 300):\n Max length of contextualized input returned by [`~RagRetriever.__call__`].\n retrieval_vector_size (`int`, *optional*, defaults to 768):\n Dimensionality of the document embeddings indexed by [`RagRetriever`].\n retrieval_batch_size (`int`, *optional*, defaults to 8):\n Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated\n [`RagRetriever`].\n dataset (`str`, *optional*, defaults to `\"wiki_dpr\"`):\n A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids\n using `datasets.list_datasets()`).\n dataset_split (`str`, *optional*, defaults to `\"train\"`)\n Which split of the `dataset` to load.\n index_name (`str`, *optional*, defaults to `\"compressed\"`)\n The index name of the index associated with the `dataset`. One can choose between `\"legacy\"`, `\"exact\"` and\n `\"compressed\"`.\n index_path (`str`, *optional*)\n The path to the serialized faiss index on disk.\n passages_path (`str`, *optional*):\n A path to text passages compatible with the faiss index. Required if using\n [`~models.rag.retrieval_rag.LegacyIndex`]\n use_dummy_dataset (`bool`, *optional*, defaults to `False`)\n Whether to load a \"dummy\" variant of the dataset specified by `dataset`.\n label_smoothing (`float`, *optional*, defaults to 0.0):\n Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing\n in the loss calculation. If set to 0, no label smoothing is performed.\n do_marginalize (`bool`, *optional*, defaults to `False`):\n If `True`, the logits are marginalized over all documents by making use of\n `torch.nn.functional.log_softmax`.\n reduce_loss (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.\n do_deduplication (`bool`, *optional*, defaults to `True`):\n Whether or not to deduplicate the generations from different context documents for a given input. Has to be\n set to `False` if used while training with distributed backend.\n exclude_bos_score (`bool`, *optional*, defaults to `False`):\n Whether or not to disregard the BOS token when computing the loss.\n output_retrieved(`bool`, *optional*, defaults to `False`):\n If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and\n `context_attention_mask` are returned. See returned tensors for more detail.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models).\n forced_eos_token_id (`int`, *optional*):\n The id of the token to force as the last generated token when `max_length` is reached. Usually set to\n `eos_token_id`.\n"
@add_start_docstrings(SCREAMING_SNAKE_CASE__ )
class a__ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = 'rag'
A : int = True
def __init__( self : Dict , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Tuple=True , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Optional[int]=None , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : int=None , lowerCAmelCase_ : Dict=" / " , lowerCAmelCase_ : Optional[Any]=" // " , lowerCAmelCase_ : Dict=5 , lowerCAmelCase_ : Any=300 , lowerCAmelCase_ : int=768 , lowerCAmelCase_ : Any=8 , lowerCAmelCase_ : Optional[int]="wiki_dpr" , lowerCAmelCase_ : Union[str, Any]="train" , lowerCAmelCase_ : str="compressed" , lowerCAmelCase_ : List[str]=None , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : int=False , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : Optional[int]=0.0 , lowerCAmelCase_ : List[Any]=True , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Union[str, Any]=False , lowerCAmelCase_ : Dict=False , lowerCAmelCase_ : List[str]=True , lowerCAmelCase_ : Dict=None , **lowerCAmelCase_ : List[str] , ) -> Tuple:
super().__init__(
bos_token_id=lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , decoder_start_token_id=lowerCAmelCase_ , forced_eos_token_id=lowerCAmelCase_ , is_encoder_decoder=lowerCAmelCase_ , prefix=lowerCAmelCase_ , vocab_size=lowerCAmelCase_ , **lowerCAmelCase_ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
__A= kwargs.pop('question_encoder' )
__A= question_encoder_config.pop('model_type' )
__A= kwargs.pop('generator' )
__A= decoder_config.pop('model_type' )
from ..auto.configuration_auto import AutoConfig
__A= AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
__A= AutoConfig.for_model(lowerCAmelCase_ , **lowerCAmelCase_ )
__A= reduce_loss
__A= label_smoothing
__A= exclude_bos_score
__A= do_marginalize
__A= title_sep
__A= doc_sep
__A= n_docs
__A= max_combined_length
__A= dataset
__A= dataset_split
__A= index_name
__A= retrieval_vector_size
__A= retrieval_batch_size
__A= passages_path
__A= index_path
__A= use_dummy_dataset
__A= output_retrieved
__A= do_deduplication
__A= use_cache
if self.forced_eos_token_id is None:
__A= getattr(self.generator , 'forced_eos_token_id' , lowerCAmelCase_ )
@classmethod
def lowerCAmelCase ( cls : Tuple , lowerCAmelCase_ : PretrainedConfig , lowerCAmelCase_ : PretrainedConfig , **lowerCAmelCase_ : Optional[Any] ) -> Dict:
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowerCAmelCase_ )
def lowerCAmelCase ( self : Optional[int] ) -> List[str]:
__A= copy.deepcopy(self.__dict__ )
__A= self.question_encoder.to_dict()
__A= self.generator.to_dict()
__A= self.__class__.model_type
return output
| 186 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 0 |
'''simple docstring'''
lowerCamelCase :str = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowerCamelCase :Any = ["a", "b", "c", "d", "e"]
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = start
# add current to visited
visited.append(UpperCamelCase_ )
A_ : Tuple = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
A_ : Optional[Any] = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# if all neighbors visited add current to sort
sort.append(UpperCamelCase_ )
# if all vertices haven't been visited select a new one to visit
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
for vertice in vertices:
if vertice not in visited:
A_ : Union[str, Any] = topological_sort(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# return sort
return sort
if __name__ == "__main__":
lowerCamelCase :Dict = topological_sort('''a''', [], [])
print(sort)
| 667 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 | 0 |
from itertools import permutations
def snake_case__ ( lowerCamelCase_ ):
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
A : List[str] = [7, 11, 13, 17]
for i, test in enumerate(UpperCamelCase_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def snake_case__ ( lowerCamelCase_ = 10 ):
return sum(
int(''''''.join(map(UpperCamelCase_ , UpperCamelCase_ ) ) )
for num in permutations(range(UpperCamelCase_ ) )
if is_substring_divisible(UpperCamelCase_ ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 542 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 0 |
'''simple docstring'''
import argparse
import copy
def _SCREAMING_SNAKE_CASE ( __snake_case : str ):
_A = {}
with open(UpperCamelCase_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
_A = []
_list.append([line.split()[1], line.split()[2]] )
_A = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
_A = []
_list.append([line.split()[0], line.split()[2]] )
_A = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : str ):
with open(UpperCamelCase_ ) as f:
_A = f.read(1 )
_A = start_node
_A = []
_A = start_node
_A = 0
while visiting not in first_solution:
_A = 1_0_0_0_0
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(UpperCamelCase_ ) and k[0] not in first_solution:
_A = k[1]
_A = k[0]
first_solution.append(UpperCamelCase_ )
_A = distance_of_first_solution + int(UpperCamelCase_ )
_A = best_node
first_solution.append(UpperCamelCase_ )
_A = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
_A = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_0_0_0_0
)
return first_solution, distance_of_first_solution
def _SCREAMING_SNAKE_CASE ( __snake_case : str , __snake_case : Dict ):
_A = []
for n in solution[1:-1]:
_A = solution.index(UpperCamelCase_ )
for kn in solution[1:-1]:
_A = solution.index(UpperCamelCase_ )
if n == kn:
continue
_A = copy.deepcopy(UpperCamelCase_ )
_A = kn
_A = n
_A = 0
for k in _tmp[:-1]:
_A = _tmp[_tmp.index(UpperCamelCase_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
_A = distance + int(i[1] )
_tmp.append(UpperCamelCase_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
_A = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __snake_case : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def _SCREAMING_SNAKE_CASE ( __snake_case : Dict , __snake_case : Any , __snake_case : str , __snake_case : Dict , __snake_case : Tuple ):
_A = 1
_A = first_solution
_A = []
_A = distance_of_first_solution
_A = solution
while count <= iters:
_A = find_neighborhood(UpperCamelCase_ , UpperCamelCase_ )
_A = 0
_A = neighborhood[index_of_best_solution]
_A = len(UpperCamelCase_ ) - 1
_A = False
while not found:
_A = 0
while i < len(UpperCamelCase_ ):
if best_solution[i] != solution[i]:
_A = best_solution[i]
_A = solution[i]
break
_A = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
_A = True
_A = best_solution[:-1]
_A = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
_A = cost
_A = solution
else:
_A = index_of_best_solution + 1
_A = neighborhood[index_of_best_solution]
if len(UpperCamelCase_ ) >= size:
tabu_list.pop(0 )
_A = count + 1
return best_solution_ever, best_cost
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int]=None ):
_A = generate_neighbours(args.File )
_A , _A = generate_first_solution(
args.File , UpperCamelCase_ )
_A , _A = tabu_search(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , args.Iterations , args.Size , )
print(F'Best solution: {best_sol}, with total distance: {best_cost}.' )
if __name__ == "__main__":
_UpperCAmelCase : Optional[Any] = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 107 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _a ( UpperCAmelCase ) -> List[Any]: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def _a ( UpperCAmelCase ) -> int: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : int
_UpperCAmelCase : str
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
def __lowerCamelCase ( self : Dict ) ->List[str]:
lowerCamelCase__ : Dict = {}
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Any = 1
lowerCamelCase__ : Any = [1, 2]
lowerCamelCase__ : List[Any] = {'''a''': 1, '''b''': 2}
lowerCamelCase__ : Any = {'''a''': [1, 2], '''b''': [3, 4]}
lowerCamelCase__ : Union[str, Any] = {'''a''': {'''1''': 1}, '''b''': 2}
lowerCamelCase__ : List[str] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
lowerCamelCase__ : str = {}
lowerCamelCase__ : Any = []
lowerCamelCase__ : Optional[Any] = 2
lowerCamelCase__ : List[Any] = [2, 3]
lowerCamelCase__ : str = {'''a''': 2, '''b''': 3}
lowerCamelCase__ : List[str] = {'''a''': [2, 3], '''b''': [4, 5]}
lowerCamelCase__ : Union[str, Any] = {'''a''': {'''1''': 2}, '''b''': 3}
lowerCamelCase__ : Any = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
self.assertEqual(map_nested(A , A ) , A )
lowerCamelCase__ : Dict = 2
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
self.assertEqual(map_nested(A , A , num_proc=A ) , A )
lowerCamelCase__ : int = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
lowerCamelCase__ : List[Any] = {'''a''': 2, '''b''': 0, '''c''': 2}
lowerCamelCase__ : List[Any] = {
'''a''': np.eye(2 ).astype(A ),
'''b''': np.zeros(3 ).astype(A ),
'''c''': np.ones(2 ).astype(A ),
}
self.assertEqual(map_nested(A , A , map_numpy=A ) , A )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A , A , map_numpy=A ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(A , A , map_numpy=A , num_proc=A ) , A )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(A , A , map_numpy=A , num_proc=A ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(A ): # can't pickle a local lambda
map_nested(lambda A : x + 1 , A , num_proc=A )
def __lowerCamelCase ( self : Optional[int] ) ->Optional[Any]:
lowerCamelCase__ : int = {'''a''': 1, '''b''': 2}
lowerCamelCase__ : List[str] = {'''a''': 3, '''b''': 4}
lowerCamelCase__ : Optional[int] = {'''a''': 5, '''b''': 6}
lowerCamelCase__ : str = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(A , A , A ) ) , A )
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[int]:
class __SCREAMING_SNAKE_CASE :
_UpperCAmelCase : Union[str, Any] = 'bar'
lowerCamelCase__ : str = Foo()
self.assertEqual(foo.my_attr , '''bar''' )
with temporary_assignment(A , '''my_attr''' , '''BAR''' ):
self.assertEqual(foo.my_attr , '''BAR''' )
self.assertEqual(foo.my_attr , '''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict:
"""simple docstring"""
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
lowerCamelCase__ : int = {f"{i}": i for i in range(UpperCamelCase_ )}
lowerCamelCase__ : Dict = map_nested(lambda UpperCAmelCase : x + 10 , UpperCamelCase_ , num_proc=UpperCamelCase_ , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
@require_tf
def __lowerCamelCase ( self : List[str] ) ->str:
import tensorflow as tf
from tensorflow.keras import layers
lowerCamelCase__ : Optional[Any] = layers.Dense(2 )
def gen_random_output():
lowerCamelCase__ : Tuple = tf.random.uniform((1, 3) )
return model(A ).numpy()
with temp_seed(4_2 , set_tensorflow=A ):
lowerCamelCase__ : Optional[int] = gen_random_output()
with temp_seed(4_2 , set_tensorflow=A ):
lowerCamelCase__ : List[str] = gen_random_output()
lowerCamelCase__ : Optional[Any] = gen_random_output()
np.testing.assert_equal(A , A )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __lowerCamelCase ( self : int ) ->Union[str, Any]:
import torch
def gen_random_output():
lowerCamelCase__ : Any = torch.nn.Linear(3 , 2 )
lowerCamelCase__ : Optional[int] = torch.rand(1 , 3 )
return model(A ).detach().numpy()
with temp_seed(4_2 , set_pytorch=A ):
lowerCamelCase__ : int = gen_random_output()
with temp_seed(4_2 , set_pytorch=A ):
lowerCamelCase__ : List[str] = gen_random_output()
lowerCamelCase__ : Optional[int] = gen_random_output()
np.testing.assert_equal(A , A )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __lowerCamelCase ( self : Optional[Any] ) ->List[Any]:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(4_2 ):
lowerCamelCase__ : Optional[Any] = gen_random_output()
with temp_seed(4_2 ):
lowerCamelCase__ : Any = gen_random_output()
lowerCamelCase__ : Union[str, Any] = gen_random_output()
np.testing.assert_equal(A , A )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def _a ( UpperCAmelCase ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = NestedDataStructure(UpperCamelCase_ ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def _a ( UpperCAmelCase , UpperCAmelCase ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : str = NestedDataStructure(UpperCamelCase_ ).flatten()
assert output == expected_output
def _a ( ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = A(x=1 , y='''foobar''' )
lowerCamelCase__ : Any = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(UpperCamelCase_ ) == expected_output
lowerCamelCase__ : Tuple = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
lowerCamelCase__ : Any = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(UpperCamelCase_ ) == expected_output
with pytest.raises(UpperCamelCase_ ):
asdict([1, A(x=10 , y='''foo''' )] )
def _a ( UpperCAmelCase ) -> Dict:
"""simple docstring"""
return text.split()
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _a ( ) -> Optional[int]:
"""simple docstring"""
with Pool(2 ) as pool:
lowerCamelCase__ : Any = list(iflatmap_unordered(UpperCamelCase_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(UpperCamelCase_ ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowerCamelCase__ : str = list(iflatmap_unordered(UpperCamelCase_ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(UpperCamelCase_ ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowerCamelCase__ : List[Any] = []
for yield_time, content in iflatmap_unordered(
UpperCamelCase_ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(UpperCamelCase_ )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(UpperCamelCase_ ) == 4
| 315 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : List[str] = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : Optional[Any] = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
snake_case__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 408 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 0 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , __magic_name__ : Collection[float] | None = None ) -> Dict:
if components is None:
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : List[Any] = list(__magic_name__ )
def __len__( self : Optional[Any] ) -> Dict:
return len(self.__components )
def __str__( self : Optional[Any] ) -> Optional[Any]:
return "(" + ",".join(map(__magic_name__ , self.__components ) ) + ")"
def __add__( self : Dict , __magic_name__ : Vector ) -> Dict:
lowerCamelCase_ : List[Any] = len(self )
if size == len(__magic_name__ ):
lowerCamelCase_ : Union[str, Any] = [self.__components[i] + other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else:
raise Exception("must have the same size" )
def __sub__( self : Tuple , __magic_name__ : Vector ) -> Union[str, Any]:
lowerCamelCase_ : Optional[int] = len(self )
if size == len(__magic_name__ ):
lowerCamelCase_ : List[str] = [self.__components[i] - other.component(__magic_name__ ) for i in range(__magic_name__ )]
return Vector(__magic_name__ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Optional[Any] , __magic_name__ : float ) -> Optional[int]:
...
@overload
def __mul__( self : Union[str, Any] , __magic_name__ : Vector ) -> Optional[Any]:
...
def __mul__( self : Union[str, Any] , __magic_name__ : float | Vector ) -> Union[str, Any]:
if isinstance(__magic_name__ , (float, int) ):
lowerCamelCase_ : Optional[int] = [c * other for c in self.__components]
return Vector(__magic_name__ )
elif isinstance(__magic_name__ , __magic_name__ ) and len(self ) == len(__magic_name__ ):
lowerCamelCase_ : List[Any] = len(self )
lowerCamelCase_ : str = [self.__components[i] * other.component(__magic_name__ ) for i in range(__magic_name__ )]
return sum(__magic_name__ )
else: # error case
raise Exception("invalid operand!" )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
return Vector(self.__components )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int ) -> Union[str, Any]:
if isinstance(__magic_name__ , __magic_name__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : int , __magic_name__ : float ) -> Union[str, Any]:
assert -len(self.__components ) <= pos < len(self.__components )
lowerCamelCase_ : Union[str, Any] = value
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowerCamelCase_ : List[str] = [c**2 for c in self.__components]
return math.sqrt(sum(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Vector , __magic_name__ : bool = False ) -> Optional[int]:
lowerCamelCase_ : Optional[Any] = self * other
lowerCamelCase_ : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __a ( __UpperCAmelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(UpperCamelCase_ , UpperCamelCase_ )
return Vector([0] * dimension )
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Vector:
"""simple docstring"""
assert isinstance(UpperCamelCase_ , UpperCamelCase_ ) and (isinstance(UpperCamelCase_ , UpperCamelCase_ ))
lowerCamelCase_ : Union[str, Any] = [0] * dimension
lowerCamelCase_ : Optional[int] = 1
return Vector(UpperCamelCase_ )
def __a ( __UpperCAmelCase : float , __UpperCAmelCase : Vector , __UpperCAmelCase : Vector ) -> Vector:
"""simple docstring"""
assert (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and isinstance(UpperCamelCase_ , UpperCamelCase_ )
and (isinstance(UpperCamelCase_ , (int, float) ))
)
return x * scalar + y
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Vector:
"""simple docstring"""
random.seed(UpperCamelCase_ )
lowerCamelCase_ : List[Any] = [random.randint(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(UpperCamelCase_ )]
return Vector(UpperCamelCase_ )
class snake_case_ :
'''simple docstring'''
def __init__( self : Dict , __magic_name__ : list[list[float]] , __magic_name__ : int , __magic_name__ : int ) -> int:
lowerCamelCase_ : int = matrix
lowerCamelCase_ : str = w
lowerCamelCase_ : Optional[int] = h
def __str__( self : str ) -> Tuple:
lowerCamelCase_ : str = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : int , __magic_name__ : Matrix ) -> Dict:
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase_ : List[Any] = []
for i in range(self.__height ):
lowerCamelCase_ : Any = [
self.__matrix[i][j] + other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Dict , __magic_name__ : Matrix ) -> str:
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase_ : Union[str, Any] = []
for i in range(self.__height ):
lowerCamelCase_ : Tuple = [
self.__matrix[i][j] - other.component(__magic_name__ , __magic_name__ )
for j in range(self.__width )
]
matrix.append(__magic_name__ )
return Matrix(__magic_name__ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Optional[Any] , __magic_name__ : float ) -> Tuple:
...
@overload
def __mul__( self : Optional[Any] , __magic_name__ : Vector ) -> Union[str, Any]:
...
def __mul__( self : Optional[int] , __magic_name__ : float | Vector ) -> str:
if isinstance(__magic_name__ , __magic_name__ ): # matrix-vector
if len(__magic_name__ ) == self.__width:
lowerCamelCase_ : List[Any] = zero_vector(self.__height )
for i in range(self.__height ):
lowerCamelCase_ : int = [
self.__matrix[i][j] * other.component(__magic_name__ )
for j in range(self.__width )
]
ans.change_component(__magic_name__ , sum(__magic_name__ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(__magic_name__ , (int, float) ): # matrix-scalar
lowerCamelCase_ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__magic_name__ , self.__width , self.__height )
return None
def __SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
return self.__height
def __SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
return self.__width
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : int , __magic_name__ : int ) -> Optional[Any]:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int , __magic_name__ : int , __magic_name__ : float ) -> Dict:
if 0 <= x < self.__height and 0 <= y < self.__width:
lowerCamelCase_ : List[str] = value
else:
raise Exception("change_component: indices out of bounds" )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : int , __magic_name__ : int ) -> List[Any]:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowerCamelCase_ : int = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__magic_name__ ) ):
lowerCamelCase_ : Optional[int] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__magic_name__ , self.__width - 1 , self.__height - 1 ).determinant()
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : int , __magic_name__ : int ) -> int:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__magic_name__ , __magic_name__ )
else:
raise Exception("Indices out of bounds" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowerCamelCase_ : int = [
self.__matrix[0][y] * self.cofactor(0 , __magic_name__ ) for y in range(self.__width )
]
return sum(__magic_name__ )
def __a ( __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
lowerCamelCase_ : Union[str, Any] = [[0] * n for _ in range(UpperCamelCase_ )]
return Matrix(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Matrix:
"""simple docstring"""
random.seed(UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = [
[random.randint(UpperCamelCase_ , UpperCamelCase_ ) for _ in range(UpperCamelCase_ )] for _ in range(UpperCamelCase_ )
]
return Matrix(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
| 488 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'timm_backbone'
def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 48 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __SCREAMING_SNAKE_CASE ( A_ ):
return (data["data"], data["target"])
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Tuple = XGBClassifier()
classifier.fit(UpperCamelCase_ , UpperCamelCase_ )
return classifier
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : int = load_iris()
lowerCAmelCase__ ,lowerCAmelCase__ : List[Any] = data_handling(UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : str = train_test_split(
UpperCamelCase_ , UpperCamelCase_ , test_size=0.25 )
lowerCAmelCase__ : Dict = iris['''target_names''']
# Create an XGBoost Classifier from the training data
lowerCAmelCase__ : Optional[Any] = xgboost(UpperCamelCase_ , UpperCamelCase_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , display_labels=UpperCamelCase_ , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 450 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48 | 0 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def snake_case ( snake_case__ :Dict) -> List[str]:
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items())
def snake_case ( snake_case__ :Any , snake_case__ :Union[str, Any]) -> Dict:
_A = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
_A = key.replace("""heads.cmd.mim_head.cls.predictions""" , """mmm_image_head""")
_A = key.replace("""heads.cmd.mlm_head.cls.predictions""" , """mmm_text_head""")
_A = key.replace("""heads.cmd.itm_head.cls""" , """itm_head""")
_A = key.replace("""heads.cmd.itm_head.pooler""" , """itm_head.pooler""")
_A = key.replace("""heads.cmd.clip_head.logit_scale""" , """flava.logit_scale""")
_A = key.replace("""heads.fairseq_mlm.cls.predictions""" , """mlm_head""")
_A = key.replace("""heads.imagenet.mim_head.cls.predictions""" , """mim_head""")
_A = key.replace("""mm_text_projection""" , """flava.text_to_mm_projection""")
_A = key.replace("""mm_image_projection""" , """flava.image_to_mm_projection""")
_A = key.replace("""image_encoder.module""" , """flava.image_model""")
_A = key.replace("""text_encoder.module""" , """flava.text_model""")
_A = key.replace("""mm_encoder.module.encoder.cls_token""" , """flava.multimodal_model.cls_token""")
_A = key.replace("""mm_encoder.module""" , """flava.multimodal_model""")
_A = key.replace("""text_projection""" , """flava.text_projection""")
_A = key.replace("""image_projection""" , """flava.image_projection""")
_A = value.float()
for key, value in codebook_state_dict.items():
_A = value
return upgrade
@torch.no_grad()
def snake_case ( snake_case__ :str , snake_case__ :Tuple , snake_case__ :Optional[Any] , snake_case__ :int=None) -> Dict:
if config_path is not None:
_A = FlavaConfig.from_pretrained(UpperCamelCase_)
else:
_A = FlavaConfig()
_A = FlavaForPreTraining(UpperCamelCase_).eval()
_A = convert_dalle_checkpoint(UpperCamelCase_ , UpperCamelCase_ , save_checkpoint=UpperCamelCase_)
if os.path.exists(UpperCamelCase_):
_A = torch.load(UpperCamelCase_ , map_location="""cpu""")
else:
_A = torch.hub.load_state_dict_from_url(UpperCamelCase_ , map_location="""cpu""")
_A = upgrade_state_dict(UpperCamelCase_ , UpperCamelCase_)
hf_model.load_state_dict(UpperCamelCase_)
_A = hf_model.state_dict()
_A = count_parameters(UpperCamelCase_)
_A = count_parameters(UpperCamelCase_) + count_parameters(UpperCamelCase_)
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3)
hf_model.save_pretrained(UpperCamelCase_)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 401 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
__snake_case = logging.get_logger(__name__)
class _a ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self : int , *lowercase_ : Any , **lowercase_ : Any ):
'''simple docstring'''
warnings.warn(
"""The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use YolosImageProcessor instead.""" , lowercase_ , )
super().__init__(*lowercase_ , **lowercase_ )
| 451 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 0 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[int] ) -> int:
'''simple docstring'''
A__ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: List[str] , SCREAMING_SNAKE_CASE_: Dict , SCREAMING_SNAKE_CASE_: int ) -> Tuple:
'''simple docstring'''
A__ = 0
while b > 0:
if b & 1:
A__ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 514 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 0 |
'''simple docstring'''
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase__ = logging.getLogger(__name__)
UpperCAmelCase__ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a__ :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'''
)
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''If training from scratch, pass a model type from the list: ''' + ''', '''.join(SCREAMING_SNAKE_CASE__ )} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Override some existing default config settings when a model is trained from scratch. Example: '''
'''n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'''
)
} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
A : str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
def lowerCAmelCase ( self : str ) -> Optional[Any]:
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
'--config_overrides can\'t be used in combination with --config_name or --model_name_or_path' )
@dataclass
class a__ :
'''simple docstring'''
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
A : Optional[str] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The input training data file (a text file).'''} )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input evaluation data file to evaluate the perplexity on (a text file).'''} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input train ref data file for whole word masking in Chinese.'''} , )
A : Optional[str] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''An optional input validation ref data file for whole word masking in Chinese.'''} , )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
A : Optional[int] = field(
default=5 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated. Default to the max input length of the model.'''
)
} , )
A : Optional[int] = field(
default=SCREAMING_SNAKE_CASE__ , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
A : float = field(
default=0.1_5 , metadata={'''help''': '''Ratio of tokens to mask for masked language modeling loss'''} )
A : bool = field(
default=SCREAMING_SNAKE_CASE__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
def lowerCAmelCase ( self : List[Any] ) -> Union[str, Any]:
if self.train_file is not None:
__A= self.train_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
__A= self.validation_file.split('.' )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Dict,_SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
with open(UpperCamelCase_,'r',encoding='utf-8' ) as f:
__A= [json.loads(UpperCamelCase_ ) for line in f.read().splitlines() if (len(UpperCamelCase_ ) > 0 and not line.isspace())]
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
__A= {c: dataset[c] for c in dataset.column_names}
__A= refs
return Dataset.from_dict(UpperCamelCase_ )
def UpperCAmelCase__( ):
"""simple docstring"""
__A= HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__A, __A, __A= parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__A, __A, __A= parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
__A= None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__A= get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',datefmt='%m/%d/%Y %H:%M:%S',handlers=[logging.StreamHandler(sys.stdout )],)
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('Training/evaluation parameters %s',UpperCamelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__A= load_dataset(data_args.dataset_name,data_args.dataset_config_name )
if "validation" not in datasets.keys():
__A= load_dataset(
data_args.dataset_name,data_args.dataset_config_name,split=f"""train[:{data_args.validation_split_percentage}%]""",)
__A= load_dataset(
data_args.dataset_name,data_args.dataset_config_name,split=f"""train[{data_args.validation_split_percentage}%:]""",)
else:
__A= {}
if data_args.train_file is not None:
__A= data_args.train_file
if data_args.validation_file is not None:
__A= data_args.validation_file
__A= data_args.train_file.split('.' )[-1]
if extension == "txt":
__A= 'text'
__A= load_dataset(UpperCamelCase_,data_files=UpperCamelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__A= {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name:
__A= AutoConfig.from_pretrained(model_args.config_name,**UpperCamelCase_ )
elif model_args.model_name_or_path:
__A= AutoConfig.from_pretrained(model_args.model_name_or_path,**UpperCamelCase_ )
else:
__A= CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(f"""New config: {config}""" )
__A= {
'cache_dir': model_args.cache_dir,
'use_fast': model_args.use_fast_tokenizer,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
__A= AutoTokenizer.from_pretrained(model_args.tokenizer_name,**UpperCamelCase_ )
elif model_args.model_name_or_path:
__A= AutoTokenizer.from_pretrained(model_args.model_name_or_path,**UpperCamelCase_ )
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.' )
if model_args.model_name_or_path:
__A= AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path,from_tf=bool('.ckpt' in model_args.model_name_or_path ),config=UpperCamelCase_,cache_dir=model_args.cache_dir,revision=model_args.model_revision,use_auth_token=True if model_args.use_auth_token else None,)
else:
logger.info('Training new model from scratch' )
__A= AutoModelForMaskedLM.from_config(UpperCamelCase_ )
model.resize_token_embeddings(len(UpperCamelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
__A= datasets['train'].column_names
else:
__A= datasets['validation'].column_names
__A= 'text' if 'text' in column_names else column_names[0]
__A= 'max_length' if data_args.pad_to_max_length else False
def tokenize_function(_SCREAMING_SNAKE_CASE : int ):
# Remove empty lines
__A= [line for line in examples['text'] if len(UpperCamelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples['text'],padding=UpperCamelCase_,truncation=UpperCamelCase_,max_length=data_args.max_seq_length )
__A= datasets.map(
UpperCamelCase_,batched=UpperCamelCase_,num_proc=data_args.preprocessing_num_workers,remove_columns=[text_column_name],load_from_cache_file=not data_args.overwrite_cache,)
# Add the chinese references if provided
if data_args.train_ref_file is not None:
__A= add_chinese_references(tokenized_datasets['train'],data_args.train_ref_file )
if data_args.validation_ref_file is not None:
__A= add_chinese_references(
tokenized_datasets['validation'],data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
__A= data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
__A= False
# Data collator
# This one will take care of randomly masking the tokens.
__A= DataCollatorForWholeWordMask(tokenizer=UpperCamelCase_,mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
__A= Trainer(
model=UpperCamelCase_,args=UpperCamelCase_,train_dataset=tokenized_datasets['train'] if training_args.do_train else None,eval_dataset=tokenized_datasets['validation'] if training_args.do_eval else None,tokenizer=UpperCamelCase_,data_collator=UpperCamelCase_,)
# Training
if training_args.do_train:
if last_checkpoint is not None:
__A= last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
__A= model_args.model_name_or_path
else:
__A= None
__A= trainer.train(resume_from_checkpoint=UpperCamelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
__A= os.path.join(training_args.output_dir,'train_results.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_,'w' ) as writer:
logger.info('***** Train results *****' )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir,'trainer_state.json' ) )
# Evaluation
__A= {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
__A= trainer.evaluate()
__A= math.exp(eval_output['eval_loss'] )
__A= perplexity
__A= os.path.join(training_args.output_dir,'eval_results_mlm_wwm.txt' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_,'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in sorted(results.items() ):
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
return results
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 186 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase :Optional[int] = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :str = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Tuple = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase :Dict = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
lowerCamelCase :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 667 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=4_00 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , ) -> Optional[int]:
A : List[Any] = size if size is not None else {'''shortest_edge''': 20}
A : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
A : Tuple = parent
A : Union[str, Any] = batch_size
A : List[str] = num_channels
A : Tuple = image_size
A : List[str] = min_resolution
A : int = max_resolution
A : List[str] = do_resize
A : Optional[Any] = size
A : Tuple = do_center_crop
A : str = crop_size
def snake_case ( self ) -> int:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = MobileNetVaImageProcessor if is_vision_available() else None
def snake_case ( self ) -> Dict:
A : Any = MobileNetVaImageProcessingTester(self )
@property
def snake_case ( self ) -> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case ( self ) -> str:
A : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__UpperCAmelCase , '''crop_size''' ) )
def snake_case ( self ) -> List[Any]:
A : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
A : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def snake_case ( self ) -> Dict:
pass
def snake_case ( self ) -> List[str]:
A : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A : Dict = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case ( self ) -> Union[str, Any]:
A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
A : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def snake_case ( self ) -> Tuple:
A : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
A : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
A : int = image_processing(__UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 542 |
'''simple docstring'''
from math import sqrt
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase_ ):
total += i + n // i
elif i == sqrt(UpperCamelCase_ ):
total += i
return total - n
def A ( UpperCamelCase_ : int = 1_00_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = sum(
i
for i in range(1 , UpperCamelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 48 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : float = 0.0 , __snake_case : float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 107 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase_ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = features.mean(1 )
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.shape[1]
lowerCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
lowerCAmelCase__ = features.mean(1 )
# Center the dataset
lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) )
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1]
lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ ,lowerCAmelCase__ = eigh(
covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ )
lowerCAmelCase__ = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = linear_discriminant_analysis(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ )
if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
import argparse
import json
import subprocess
def _a ( UpperCAmelCase , UpperCAmelCase ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[Any] = (
f"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
lowerCamelCase__ : Optional[Any] = subprocess.run(UpperCamelCase_ , shell=UpperCamelCase_ , stdout=subprocess.PIPE )
lowerCamelCase__ : Optional[Any] = output.stdout.decode('''utf-8''' )
lowerCamelCase__ : Optional[int] = json.loads(UpperCamelCase_ )
lowerCamelCase__ : Optional[int] = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCamelCase_ )
# save the result so we can report them on Slack
with open('''offline_runners.txt''' , '''w''' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) )
if len(UpperCamelCase_ ) > 0:
lowerCamelCase__ : str = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(f"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def _a ( UpperCAmelCase ) -> int:
"""simple docstring"""
return values.split(''',''' )
_A : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
_A : Any = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 315 |
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _a ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
snake_case =StableDiffusionPanoramaPipeline
snake_case =TEXT_TO_IMAGE_PARAMS
snake_case =TEXT_TO_IMAGE_BATCH_PARAMS
snake_case =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case =TEXT_TO_IMAGE_IMAGE_PARAMS
def SCREAMING_SNAKE_CASE ( self ):
torch.manual_seed(0 )
_UpperCAmelCase =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
_UpperCAmelCase =DDIMScheduler()
torch.manual_seed(0 )
_UpperCAmelCase =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase =CLIPTextModel(_snake_case )
_UpperCAmelCase =CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_UpperCAmelCase ={
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case=0 ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
_UpperCAmelCase ={
"prompt": "a photo of the dolomites",
"generator": generator,
# Setting height and width to None to prevent OOMs on CPU.
"height": None,
"width": None,
"num_inference_steps": 1,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =StableDiffusionPanoramaPipeline(**_snake_case )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =sd_pipe(**_snake_case ).images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase =np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def SCREAMING_SNAKE_CASE ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5E-3 )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =StableDiffusionPanoramaPipeline(**_snake_case )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase ="french fries"
_UpperCAmelCase =sd_pipe(**_snake_case , negative_prompt=_snake_case )
_UpperCAmelCase =output.images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase =np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =StableDiffusionPanoramaPipeline(**_snake_case )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =sd_pipe(**_snake_case , view_batch_size=2 )
_UpperCAmelCase =output.images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase =np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" )
_UpperCAmelCase =StableDiffusionPanoramaPipeline(**_snake_case )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =sd_pipe(**_snake_case ).images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase =np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase =self.get_dummy_components()
_UpperCAmelCase =PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , skip_prk_steps=_snake_case )
_UpperCAmelCase =StableDiffusionPanoramaPipeline(**_snake_case )
_UpperCAmelCase =sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
_UpperCAmelCase =self.get_dummy_inputs(_snake_case )
_UpperCAmelCase =sd_pipe(**_snake_case ).images
_UpperCAmelCase =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase =np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self , _snake_case=0 ):
_UpperCAmelCase =torch.manual_seed(_snake_case )
_UpperCAmelCase ={
"prompt": "a photo of the dolomites",
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase ="stabilityai/stable-diffusion-2-base"
_UpperCAmelCase =DDIMScheduler.from_pretrained(_snake_case , subfolder="scheduler" )
_UpperCAmelCase =StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_UpperCAmelCase =self.get_inputs()
_UpperCAmelCase =pipe(**_snake_case ).images
_UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCAmelCase =np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-2
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =StableDiffusionPanoramaPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-base" , safety_checker=_snake_case )
_UpperCAmelCase =LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_UpperCAmelCase =self.get_inputs()
_UpperCAmelCase =pipe(**_snake_case ).images
_UpperCAmelCase =image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
_UpperCAmelCase =np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =0
def callback_fn(_snake_case , _snake_case , _snake_case ) -> None:
_UpperCAmelCase =True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_UpperCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCAmelCase =latents[0, -3:, -3:, -1]
_UpperCAmelCase =np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_UpperCAmelCase =latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
_UpperCAmelCase =latents[0, -3:, -3:, -1]
_UpperCAmelCase =np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_UpperCAmelCase =False
_UpperCAmelCase ="stabilityai/stable-diffusion-2-base"
_UpperCAmelCase =DDIMScheduler.from_pretrained(_snake_case , subfolder="scheduler" )
_UpperCAmelCase =StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
_UpperCAmelCase =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
_UpperCAmelCase =self.get_inputs()
pipe(**_snake_case , callback=_snake_case , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def SCREAMING_SNAKE_CASE ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCAmelCase ="stabilityai/stable-diffusion-2-base"
_UpperCAmelCase =DDIMScheduler.from_pretrained(_snake_case , subfolder="scheduler" )
_UpperCAmelCase =StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
_UpperCAmelCase =pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCAmelCase =self.get_inputs()
_UpperCAmelCase =pipe(**_snake_case )
_UpperCAmelCase =torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 408 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 | 0 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
snake_case_ : Union[str, Any] = logging.getLogger(__name__)
class snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , __magic_name__ : Dict=-1 ) -> Optional[Any]:
lowerCamelCase_ : str = label_idx
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Union[Split, str] ) -> List[Any]:
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase_ : Any = mode.value
lowerCamelCase_ : Tuple = os.path.join(__magic_name__ , F"{mode}.txt" )
lowerCamelCase_ : int = 1
lowerCamelCase_ : List[Any] = []
with open(__magic_name__ , encoding="utf-8" ) as f:
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : str = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
lowerCamelCase_ : Optional[int] = []
lowerCamelCase_ : Optional[Any] = []
else:
lowerCamelCase_ : Optional[int] = line.split(" " )
words.append(splits[0] )
if len(__magic_name__ ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__magic_name__ , labels=__magic_name__ ) )
return examples
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ) -> Any:
lowerCamelCase_ : Optional[int] = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(__magic_name__ )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowerCamelCase_ : Tuple = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(__magic_name__ )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : str ) -> Dict:
if path:
with open(__magic_name__ , "r" ) as f:
lowerCamelCase_ : int = f.read().splitlines()
if "O" not in labels:
lowerCamelCase_ : Any = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : Optional[int] ) -> List[Any]:
super().__init__(label_idx=-2 )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ) -> List[Any]:
if path:
with open(__magic_name__ , "r" ) as f:
lowerCamelCase_ : Optional[Any] = f.read().splitlines()
if "O" not in labels:
lowerCamelCase_ : int = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : Union[Split, str] ) -> Any:
if isinstance(__magic_name__ , __magic_name__ ):
lowerCamelCase_ : Union[str, Any] = mode.value
lowerCamelCase_ : Union[str, Any] = os.path.join(__magic_name__ , F"{mode}.txt" )
lowerCamelCase_ : str = 1
lowerCamelCase_ : Any = []
with open(__magic_name__ , encoding="utf-8" ) as f:
for sentence in parse_incr(__magic_name__ ):
lowerCamelCase_ : Dict = []
lowerCamelCase_ : Any = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(__magic_name__ ) == len(__magic_name__ )
if words:
examples.append(InputExample(guid=F"{mode}-{guid_index}" , words=__magic_name__ , labels=__magic_name__ ) )
guid_index += 1
return examples
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : TextIO , __magic_name__ : TextIO , __magic_name__ : List ) -> Union[str, Any]:
lowerCamelCase_ : int = 0
for sentence in parse_incr(__magic_name__ ):
lowerCamelCase_ : int = preds_list[example_id]
lowerCamelCase_ : Dict = ""
for token in sentence:
out += F"{token['form']} ({token['upos']}|{s_p.pop(0 )}) "
out += "\n"
writer.write(__magic_name__ )
example_id += 1
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ) -> Optional[Any]:
if path:
with open(__magic_name__ , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 488 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
lowercase__ = ['image_processor', 'tokenizer']
lowercase__ = 'BlipImageProcessor'
lowercase__ = 'AutoTokenizer'
def __init__( self : Any ,lowercase_ : str ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : List[str] = False
super().__init__(lowercase_ ,lowercase_ )
lowerCAmelCase__ : str = self.image_processor
def __call__( self : Any ,lowercase_ : ImageInput = None ,lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,lowercase_ : bool = True ,lowercase_ : Union[bool, str, PaddingStrategy] = False ,lowercase_ : Union[bool, str, TruncationStrategy] = None ,lowercase_ : Optional[int] = None ,lowercase_ : int = 0 ,lowercase_ : Optional[int] = None ,lowercase_ : Optional[bool] = None ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = False ,lowercase_ : bool = True ,lowercase_ : Optional[Union[str, TensorType]] = None ,**lowercase_ : str ,):
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCAmelCase__ : Tuple = self.tokenizer
lowerCAmelCase__ : str = self.tokenizer(
text=lowercase_ ,add_special_tokens=lowercase_ ,padding=lowercase_ ,truncation=lowercase_ ,max_length=lowercase_ ,stride=lowercase_ ,pad_to_multiple_of=lowercase_ ,return_attention_mask=lowercase_ ,return_overflowing_tokens=lowercase_ ,return_special_tokens_mask=lowercase_ ,return_offsets_mapping=lowercase_ ,return_token_type_ids=lowercase_ ,return_length=lowercase_ ,verbose=lowercase_ ,return_tensors=lowercase_ ,**lowercase_ ,)
return text_encoding
# add pixel_values
lowerCAmelCase__ : List[Any] = self.image_processor(lowercase_ ,return_tensors=lowercase_ )
if text is not None:
lowerCAmelCase__ : Any = self.tokenizer(
text=lowercase_ ,add_special_tokens=lowercase_ ,padding=lowercase_ ,truncation=lowercase_ ,max_length=lowercase_ ,stride=lowercase_ ,pad_to_multiple_of=lowercase_ ,return_attention_mask=lowercase_ ,return_overflowing_tokens=lowercase_ ,return_special_tokens_mask=lowercase_ ,return_offsets_mapping=lowercase_ ,return_token_type_ids=lowercase_ ,return_length=lowercase_ ,verbose=lowercase_ ,return_tensors=lowercase_ ,**lowercase_ ,)
else:
lowerCAmelCase__ : Union[str, Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowercase_ )
return encoding_image_processor
def __lowerCAmelCase ( self : List[str] ,*lowercase_ : Optional[int] ,**lowercase_ : Optional[int] ):
return self.tokenizer.batch_decode(*lowercase_ ,**lowercase_ )
def __lowerCAmelCase ( self : Dict ,*lowercase_ : Any ,**lowercase_ : List[Any] ):
return self.tokenizer.decode(*lowercase_ ,**lowercase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def __lowerCAmelCase ( self : Optional[Any] ):
lowerCAmelCase__ : Union[str, Any] = self.tokenizer.model_input_names
lowerCAmelCase__ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 450 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 | 0 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def snake_case ( snake_case__ :str , snake_case__ :str) -> str | Literal[False]:
_A = list(UpperCamelCase_)
_A = list(UpperCamelCase_)
_A = 0
for i in range(len(UpperCamelCase_)):
if lista[i] != lista[i]:
count += 1
_A = """_"""
if count > 1:
return False
else:
return "".join(UpperCamelCase_)
def snake_case ( snake_case__ :list[str]) -> list[str]:
_A = []
while True:
_A = ["""$"""] * len(UpperCamelCase_)
_A = []
for i in range(len(UpperCamelCase_)):
for j in range(i + 1 , len(UpperCamelCase_)):
_A = compare_string(binary[i] , binary[j])
if k is False:
_A = """*"""
_A = """*"""
temp.append("""X""")
for i in range(len(UpperCamelCase_)):
if checka[i] == "$":
pi.append(binary[i])
if len(UpperCamelCase_) == 0:
return pi
_A = list(set(UpperCamelCase_))
def snake_case ( snake_case__ :int , snake_case__ :Sequence[float]) -> list[str]:
_A = []
for minterm in minterms:
_A = """"""
for _ in range(UpperCamelCase_):
_A = str(minterm % 2) + string
minterm //= 2
temp.append(UpperCamelCase_)
return temp
def snake_case ( snake_case__ :str , snake_case__ :str , snake_case__ :int) -> bool:
_A = list(UpperCamelCase_)
_A = list(UpperCamelCase_)
_A = 0
for i in range(len(UpperCamelCase_)):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def snake_case ( snake_case__ :list[list[int]] , snake_case__ :list[str]) -> list[str]:
_A = []
_A = [0] * len(UpperCamelCase_)
for i in range(len(chart[0])):
_A = 0
_A = -1
for j in range(len(UpperCamelCase_)):
if chart[j][i] == 1:
count += 1
_A = j
if count == 1:
_A = 1
for i in range(len(UpperCamelCase_)):
if select[i] == 1:
for j in range(len(chart[0])):
if chart[i][j] == 1:
for k in range(len(UpperCamelCase_)):
_A = 0
temp.append(prime_implicants[i])
while True:
_A = 0
_A = -1
_A = 0
for i in range(len(UpperCamelCase_)):
_A = chart[i].count(1)
if count_n > max_n:
_A = count_n
_A = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem])
for i in range(len(chart[0])):
if chart[rem][i] == 1:
for j in range(len(UpperCamelCase_)):
_A = 0
def snake_case ( snake_case__ :list[str] , snake_case__ :list[str]) -> list[list[int]]:
_A = [[0 for x in range(len(UpperCamelCase_))] for x in range(len(UpperCamelCase_))]
for i in range(len(UpperCamelCase_)):
_A = prime_implicants[i].count("""_""")
for j in range(len(UpperCamelCase_)):
if is_for_table(prime_implicants[i] , binary[j] , UpperCamelCase_):
_A = 1
return chart
def snake_case ( ) -> None:
_A = int(input("""Enter the no. of variables\n"""))
_A = [
float(UpperCamelCase_)
for x in input(
"""Enter the decimal representation of Minterms 'Spaces Separated'\n""").split()
]
_A = decimal_to_binary(UpperCamelCase_ , UpperCamelCase_)
_A = check(UpperCamelCase_)
print("""Prime Implicants are:""")
print(UpperCamelCase_)
_A = prime_implicant_chart(UpperCamelCase_ , UpperCamelCase_)
_A = selection(UpperCamelCase_ , UpperCamelCase_)
print("""Essential Prime Implicants are:""")
print(UpperCamelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 401 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 | 0 |
'''simple docstring'''
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[Any]:
lowercase_ = 0
lowercase_ = len(UpperCamelCase_ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase_ ):
return None
lowercase_ = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
lowercase_ = left
lowercase_ = point
elif point > right:
lowercase_ = right
lowercase_ = point
else:
if item < current_item:
lowercase_ = point - 1
else:
lowercase_ = point + 1
return None
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->Any:
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
lowercase_ = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase_ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase_ , UpperCamelCase_ , point + 1 , UpperCamelCase_ )
def A_ ( SCREAMING_SNAKE_CASE_ ) ->Union[str, Any]:
if collection != sorted(UpperCamelCase_ ):
raise ValueError("""Collection must be ascending sorted""" )
return True
if __name__ == "__main__":
import sys
__snake_case = 0
if debug == 1:
__snake_case = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
__snake_case = 67
__snake_case = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("""Not found""")
| 451 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class a__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = 'data2vec-vision'
def __init__( self , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=1e-12 , lowercase=224 , lowercase=16 , lowercase=3 , lowercase=False , lowercase=False , lowercase=False , lowercase=False , lowercase=0.1 , lowercase=0.1 , lowercase=True , lowercase=[3, 5, 7, 11] , lowercase=[1, 2, 3, 6] , lowercase=True , lowercase=0.4 , lowercase=256 , lowercase=1 , lowercase=False , lowercase=255 , **lowercase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**lowercase )
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = layer_norm_eps
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = use_mask_token
A__ = use_absolute_position_embeddings
A__ = use_relative_position_bias
A__ = use_shared_relative_position_bias
A__ = layer_scale_init_value
A__ = drop_path_rate
A__ = use_mean_pooling
# decode head attributes (semantic segmentation)
A__ = out_indices
A__ = pool_scales
# auxiliary head attributes (semantic segmentation)
A__ = use_auxiliary_head
A__ = auxiliary_loss_weight
A__ = auxiliary_channels
A__ = auxiliary_num_convs
A__ = auxiliary_concat_input
A__ = semantic_loss_ignore_index
class a__ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCamelCase = version.parse('1.11' )
@property
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
return 1e-4
| 514 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 0 |
'''simple docstring'''
from PIL import Image
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Image,_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= (259 * (level + 255)) / (255 * (259 - level))
def contrast(_SCREAMING_SNAKE_CASE : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(UpperCamelCase_ )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
UpperCAmelCase__ = change_contrast(img, 1_7_0)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
| 186 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 0 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowerCamelCase :List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
SCREAMING_SNAKE_CASE__ , r'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
def _a (self , lowercase ):
if self.framework == "tf":
A_ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
A_ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowercase )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def _a (self , lowercase ):
A_ : List[str] = self.get_masked_index(lowercase )
A_ : List[Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , F'No mask_token ({self.tokenizer.mask_token}) found on the input' , )
def _a (self , lowercase ):
if isinstance(lowercase , lowercase ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowercase )
def _a (self , lowercase , lowercase=None , **lowercase ):
if return_tensors is None:
A_ : Any = self.framework
A_ : Any = self.tokenizer(lowercase , return_tensors=lowercase )
self.ensure_exactly_one_mask_token(lowercase )
return model_inputs
def _a (self , lowercase ):
A_ : List[str] = self.model(**lowercase )
A_ : Union[str, Any] = model_inputs["""input_ids"""]
return model_outputs
def _a (self , lowercase , lowercase=5 , lowercase=None ):
if target_ids is not None and target_ids.shape[0] < top_k:
A_ : Any = target_ids.shape[0]
A_ : List[Any] = model_outputs["""input_ids"""][0]
A_ : Dict = model_outputs["""logits"""]
if self.framework == "tf":
A_ : Dict = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
A_ : Union[str, Any] = outputs.numpy()
A_ : List[Any] = outputs[0, masked_index, :]
A_ : List[Any] = stable_softmax(lowercase , axis=-1 )
if target_ids is not None:
A_ : str = tf.gather_nd(tf.squeeze(lowercase , 0 ) , target_ids.reshape(-1 , 1 ) )
A_ : Dict = tf.expand_dims(lowercase , 0 )
A_ : Dict = tf.math.top_k(lowercase , k=lowercase )
A_, A_ : Union[str, Any] = topk.values.numpy(), topk.indices.numpy()
else:
A_ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowercase ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
A_ : str = outputs[0, masked_index, :]
A_ : List[str] = logits.softmax(dim=-1 )
if target_ids is not None:
A_ : Any = probs[..., target_ids]
A_, A_ : Optional[int] = probs.topk(lowercase )
A_ : str = []
A_ : Any = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
A_ : int = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
A_ : Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
A_ : Optional[int] = target_ids[p].tolist()
A_ : Any = p
# Filter padding out:
A_ : List[str] = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
A_ : List[Any] = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase )
A_ : Optional[int] = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(lowercase )
result.append(lowercase )
if single_mask:
return result[0]
return result
def _a (self , lowercase , lowercase=None ):
if isinstance(lowercase , lowercase ):
A_ : Optional[Any] = [targets]
try:
A_ : Optional[int] = self.tokenizer.get_vocab()
except Exception:
A_ : List[Any] = {}
A_ : str = []
for target in targets:
A_ : Any = vocab.get(lowercase , lowercase )
if id_ is None:
A_ : str = self.tokenizer(
lowercase , add_special_tokens=lowercase , return_attention_mask=lowercase , return_token_type_ids=lowercase , max_length=1 , truncation=lowercase , )["""input_ids"""]
if len(lowercase ) == 0:
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
A_ : Optional[Any] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'The specified target token `{target}` does not exist in the model vocabulary. '
F'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' )
target_ids.append(id_ )
A_ : Union[str, Any] = list(set(lowercase ) )
if len(lowercase ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
A_ : int = np.array(lowercase )
return target_ids
def _a (self , lowercase=None , lowercase=None ):
A_ : Dict = {}
if targets is not None:
A_ : int = self.get_target_ids(lowercase , lowercase )
A_ : List[Any] = target_ids
if top_k is not None:
A_ : Any = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__(self , lowercase , *lowercase , **lowercase ):
A_ : Union[str, Any] = super().__call__(lowercase , **lowercase )
if isinstance(lowercase , lowercase ) and len(lowercase ) == 1:
return outputs[0]
return outputs
| 667 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 | 0 |
from math import sqrt
def snake_case__ ( lowerCamelCase_ ):
A : str = 0
for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase_ ):
total += i + n // i
elif i == sqrt(UpperCamelCase_ ):
total += i
return total - n
def snake_case__ ( lowerCamelCase_ = 10000 ):
A : Tuple = sum(
i
for i in range(1 , UpperCamelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 542 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
_A = 'laion/clap-htsat-unfused'
_A = tempfile.mkdtemp()
def __UpperCAmelCase ( self : List[str], **UpperCamelCase__ : Any ) -> List[str]:
return RobertaTokenizer.from_pretrained(self.checkpoint, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Any, **UpperCamelCase__ : Any ) -> Optional[Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
_A = self.get_tokenizer()
_A = self.get_feature_extractor()
_A = ClapProcessor(tokenizer=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
_A = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCamelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor, UpperCamelCase__ )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict:
_A = ClapProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_A = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)' )
_A = self.get_feature_extractor(do_normalize=UpperCamelCase__, padding_value=1.0 )
_A = ClapProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=UpperCamelCase__, padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer, UpperCamelCase__ )
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor, UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = ClapProcessor(tokenizer=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
_A = floats_list((3, 10_00) )
_A = feature_extractor(UpperCamelCase__, return_tensors='np' )
_A = processor(audios=UpperCamelCase__, return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2 )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = ClapProcessor(tokenizer=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
_A = 'This is a test string'
_A = processor(text=UpperCamelCase__ )
_A = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key] )
def __UpperCAmelCase ( self : Dict ) -> Union[str, Any]:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = ClapProcessor(tokenizer=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
_A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_A = processor.batch_decode(UpperCamelCase__ )
_A = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
_A = self.get_feature_extractor()
_A = self.get_tokenizer()
_A = ClapProcessor(tokenizer=UpperCamelCase__, feature_extractor=UpperCamelCase__ )
self.assertListEqual(
processor.model_input_names[2:], feature_extractor.model_input_names, msg='`processor` and `feature_extractor` model input names do not match', )
| 107 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_A : str = logging.get_logger(__name__)
_A : int = {
"weiweishi/roc-bert-base-zh": "https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json",
}
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
_UpperCAmelCase : str = 'roc_bert'
def __init__( self : int , A : Optional[int]=3_0_5_2_2 , A : Optional[Any]=7_6_8 , A : Dict=1_2 , A : int=1_2 , A : Tuple=3_0_7_2 , A : Optional[int]="gelu" , A : List[Any]=0.1 , A : Dict=0.1 , A : Optional[Any]=5_1_2 , A : List[Any]=2 , A : Optional[Any]=0.02 , A : Tuple=1e-12 , A : List[Any]=True , A : List[Any]=0 , A : List[str]="absolute" , A : Dict=None , A : List[Any]=True , A : Optional[Any]=True , A : Dict=7_6_8 , A : int=9_1_0 , A : Optional[Any]=5_1_2 , A : int=2_4_8_5_8 , A : Dict=True , **A : Union[str, Any] , ) ->Tuple:
lowerCamelCase__ : List[str] = vocab_size
lowerCamelCase__ : Optional[Any] = max_position_embeddings
lowerCamelCase__ : Union[str, Any] = hidden_size
lowerCamelCase__ : Union[str, Any] = num_hidden_layers
lowerCamelCase__ : Optional[Any] = num_attention_heads
lowerCamelCase__ : Union[str, Any] = intermediate_size
lowerCamelCase__ : str = hidden_act
lowerCamelCase__ : Optional[int] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : int = type_vocab_size
lowerCamelCase__ : int = layer_norm_eps
lowerCamelCase__ : Dict = use_cache
lowerCamelCase__ : Optional[Any] = enable_pronunciation
lowerCamelCase__ : Tuple = enable_shape
lowerCamelCase__ : List[Any] = pronunciation_embed_dim
lowerCamelCase__ : Union[str, Any] = pronunciation_vocab_size
lowerCamelCase__ : Optional[int] = shape_embed_dim
lowerCamelCase__ : Optional[int] = shape_vocab_size
lowerCamelCase__ : Union[str, Any] = concat_input
lowerCamelCase__ : Union[str, Any] = position_embedding_type
lowerCamelCase__ : str = classifier_dropout
super().__init__(pad_token_id=A , **A )
| 315 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case__ : Optional[Any] = {"configuration_timm_backbone": ["TimmBackboneConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ : List[str] = ["TimmBackbone"]
if TYPE_CHECKING:
from .configuration_timm_backbone import TimmBackboneConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timm_backbone import TimmBackbone
else:
import sys
snake_case__ : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 408 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
UpperCAmelCase__ : int = {
"Acehnese Arabic": "ace_Arab",
"Acehnese Latin": "ace_Latn",
"Mesopotamian Arabic": "acm_Arab",
"Ta'izzi-Adeni Arabic": "acq_Arab",
"Tunisian Arabic": "aeb_Arab",
"Afrikaans": "afr_Latn",
"South Levantine Arabic": "ajp_Arab",
"Akan": "aka_Latn",
"Amharic": "amh_Ethi",
"North Levantine Arabic": "apc_Arab",
"Modern Standard Arabic": "arb_Arab",
"Modern Standard Arabic Romanized": "arb_Latn",
"Najdi Arabic": "ars_Arab",
"Moroccan Arabic": "ary_Arab",
"Egyptian Arabic": "arz_Arab",
"Assamese": "asm_Beng",
"Asturian": "ast_Latn",
"Awadhi": "awa_Deva",
"Central Aymara": "ayr_Latn",
"South Azerbaijani": "azb_Arab",
"North Azerbaijani": "azj_Latn",
"Bashkir": "bak_Cyrl",
"Bambara": "bam_Latn",
"Balinese": "ban_Latn",
"Belarusian": "bel_Cyrl",
"Bemba": "bem_Latn",
"Bengali": "ben_Beng",
"Bhojpuri": "bho_Deva",
"Banjar Arabic": "bjn_Arab",
"Banjar Latin": "bjn_Latn",
"Standard Tibetan": "bod_Tibt",
"Bosnian": "bos_Latn",
"Buginese": "bug_Latn",
"Bulgarian": "bul_Cyrl",
"Catalan": "cat_Latn",
"Cebuano": "ceb_Latn",
"Czech": "ces_Latn",
"Chokwe": "cjk_Latn",
"Central Kurdish": "ckb_Arab",
"Crimean Tatar": "crh_Latn",
"Welsh": "cym_Latn",
"Danish": "dan_Latn",
"German": "deu_Latn",
"Southwestern Dinka": "dik_Latn",
"Dyula": "dyu_Latn",
"Dzongkha": "dzo_Tibt",
"Greek": "ell_Grek",
"English": "eng_Latn",
"Esperanto": "epo_Latn",
"Estonian": "est_Latn",
"Basque": "eus_Latn",
"Ewe": "ewe_Latn",
"Faroese": "fao_Latn",
"Fijian": "fij_Latn",
"Finnish": "fin_Latn",
"Fon": "fon_Latn",
"French": "fra_Latn",
"Friulian": "fur_Latn",
"Nigerian Fulfulde": "fuv_Latn",
"Scottish Gaelic": "gla_Latn",
"Irish": "gle_Latn",
"Galician": "glg_Latn",
"Guarani": "grn_Latn",
"Gujarati": "guj_Gujr",
"Haitian Creole": "hat_Latn",
"Hausa": "hau_Latn",
"Hebrew": "heb_Hebr",
"Hindi": "hin_Deva",
"Chhattisgarhi": "hne_Deva",
"Croatian": "hrv_Latn",
"Hungarian": "hun_Latn",
"Armenian": "hye_Armn",
"Igbo": "ibo_Latn",
"Ilocano": "ilo_Latn",
"Indonesian": "ind_Latn",
"Icelandic": "isl_Latn",
"Italian": "ita_Latn",
"Javanese": "jav_Latn",
"Japanese": "jpn_Jpan",
"Kabyle": "kab_Latn",
"Jingpho": "kac_Latn",
"Kamba": "kam_Latn",
"Kannada": "kan_Knda",
"Kashmiri Arabic": "kas_Arab",
"Kashmiri Devanagari": "kas_Deva",
"Georgian": "kat_Geor",
"Central Kanuri Arabic": "knc_Arab",
"Central Kanuri Latin": "knc_Latn",
"Kazakh": "kaz_Cyrl",
"Kabiyè": "kbp_Latn",
"Kabuverdianu": "kea_Latn",
"Khmer": "khm_Khmr",
"Kikuyu": "kik_Latn",
"Kinyarwanda": "kin_Latn",
"Kyrgyz": "kir_Cyrl",
"Kimbundu": "kmb_Latn",
"Northern Kurdish": "kmr_Latn",
"Kikongo": "kon_Latn",
"Korean": "kor_Hang",
"Lao": "lao_Laoo",
"Ligurian": "lij_Latn",
"Limburgish": "lim_Latn",
"Lingala": "lin_Latn",
"Lithuanian": "lit_Latn",
"Lombard": "lmo_Latn",
"Latgalian": "ltg_Latn",
"Luxembourgish": "ltz_Latn",
"Luba-Kasai": "lua_Latn",
"Ganda": "lug_Latn",
"Luo": "luo_Latn",
"Mizo": "lus_Latn",
"Standard Latvian": "lvs_Latn",
"Magahi": "mag_Deva",
"Maithili": "mai_Deva",
"Malayalam": "mal_Mlym",
"Marathi": "mar_Deva",
"Minangkabau Arabic ": "min_Arab",
"Minangkabau Latin": "min_Latn",
"Macedonian": "mkd_Cyrl",
"Plateau Malagasy": "plt_Latn",
"Maltese": "mlt_Latn",
"Meitei Bengali": "mni_Beng",
"Halh Mongolian": "khk_Cyrl",
"Mossi": "mos_Latn",
"Maori": "mri_Latn",
"Burmese": "mya_Mymr",
"Dutch": "nld_Latn",
"Norwegian Nynorsk": "nno_Latn",
"Norwegian Bokmål": "nob_Latn",
"Nepali": "npi_Deva",
"Northern Sotho": "nso_Latn",
"Nuer": "nus_Latn",
"Nyanja": "nya_Latn",
"Occitan": "oci_Latn",
"West Central Oromo": "gaz_Latn",
"Odia": "ory_Orya",
"Pangasinan": "pag_Latn",
"Eastern Panjabi": "pan_Guru",
"Papiamento": "pap_Latn",
"Western Persian": "pes_Arab",
"Polish": "pol_Latn",
"Portuguese": "por_Latn",
"Dari": "prs_Arab",
"Southern Pashto": "pbt_Arab",
"Ayacucho Quechua": "quy_Latn",
"Romanian": "ron_Latn",
"Rundi": "run_Latn",
"Russian": "rus_Cyrl",
"Sango": "sag_Latn",
"Sanskrit": "san_Deva",
"Santali": "sat_Olck",
"Sicilian": "scn_Latn",
"Shan": "shn_Mymr",
"Sinhala": "sin_Sinh",
"Slovak": "slk_Latn",
"Slovenian": "slv_Latn",
"Samoan": "smo_Latn",
"Shona": "sna_Latn",
"Sindhi": "snd_Arab",
"Somali": "som_Latn",
"Southern Sotho": "sot_Latn",
"Spanish": "spa_Latn",
"Tosk Albanian": "als_Latn",
"Sardinian": "srd_Latn",
"Serbian": "srp_Cyrl",
"Swati": "ssw_Latn",
"Sundanese": "sun_Latn",
"Swedish": "swe_Latn",
"Swahili": "swh_Latn",
"Silesian": "szl_Latn",
"Tamil": "tam_Taml",
"Tatar": "tat_Cyrl",
"Telugu": "tel_Telu",
"Tajik": "tgk_Cyrl",
"Tagalog": "tgl_Latn",
"Thai": "tha_Thai",
"Tigrinya": "tir_Ethi",
"Tamasheq Latin": "taq_Latn",
"Tamasheq Tifinagh": "taq_Tfng",
"Tok Pisin": "tpi_Latn",
"Tswana": "tsn_Latn",
"Tsonga": "tso_Latn",
"Turkmen": "tuk_Latn",
"Tumbuka": "tum_Latn",
"Turkish": "tur_Latn",
"Twi": "twi_Latn",
"Central Atlas Tamazight": "tzm_Tfng",
"Uyghur": "uig_Arab",
"Ukrainian": "ukr_Cyrl",
"Umbundu": "umb_Latn",
"Urdu": "urd_Arab",
"Northern Uzbek": "uzn_Latn",
"Venetian": "vec_Latn",
"Vietnamese": "vie_Latn",
"Waray": "war_Latn",
"Wolof": "wol_Latn",
"Xhosa": "xho_Latn",
"Eastern Yiddish": "ydd_Hebr",
"Yoruba": "yor_Latn",
"Yue Chinese": "yue_Hant",
"Chinese Simplified": "zho_Hans",
"Chinese Traditional": "zho_Hant",
"Standard Malay": "zsm_Latn",
"Zulu": "zul_Latn",
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'facebook/nllb-200-distilled-600M'
snake_case__ :Optional[Any] = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
snake_case__ :List[Any] = 'translator'
snake_case__ :List[Any] = AutoTokenizer
snake_case__ :Optional[Any] = AutoModelForSeqaSeqLM
snake_case__ :List[str] = LANGUAGE_CODES
snake_case__ :List[Any] = ['text', 'text', 'text']
snake_case__ :List[Any] = ['text']
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if src_lang not in self.lang_to_code:
raise ValueError(f"""{src_lang} is not a supported language.""" )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"""{tgt_lang} is not a supported language.""" )
lowerCAmelCase__ = self.lang_to_code[src_lang]
lowerCAmelCase__ = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__magic_name__ , return_tensors="pt" , src_lang=__magic_name__ , tgt_lang=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__magic_name__ )
| 48 | 0 |
def __a ( __UpperCAmelCase : int ) -> str:
"""simple docstring"""
lowerCamelCase_ : Any = int(UpperCamelCase_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(UpperCamelCase_ )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = divmod(UpperCamelCase_ , 2 )
return binary_recursive(UpperCamelCase_ ) + str(UpperCamelCase_ )
def __a ( __UpperCAmelCase : str ) -> str:
"""simple docstring"""
lowerCamelCase_ : Dict = str(UpperCamelCase_ ).strip()
if not number:
raise ValueError("No input value was provided" )
lowerCamelCase_ : Dict = "-" if number.startswith("-" ) else ""
lowerCamelCase_ : Union[str, Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f"{negative}0b{binary_recursive(int(UpperCamelCase_ ) )}"
if __name__ == "__main__":
from doctest import testmod
testmod()
| 488 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : int = logging.get_logger(__name__)
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'timm_backbone'
def __init__( self : Tuple , __magic_name__ : Tuple=None , __magic_name__ : Optional[Any]=3 , __magic_name__ : Dict=True , __magic_name__ : str=True , __magic_name__ : List[Any]=None , **__magic_name__ : Tuple , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = backbone
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = features_only
lowerCAmelCase__ = use_pretrained_backbone
lowerCAmelCase__ = True
lowerCAmelCase__ = out_indices if out_indices is not None else (-1,)
| 48 | 0 |
"""simple docstring"""
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[Any] ,lowercase_ : Any ):
lowerCAmelCase__ : List[Any] = data
lowerCAmelCase__ : Dict = [0x67_45_23_01, 0xEF_CD_AB_89, 0x98_BA_DC_FE, 0x10_32_54_76, 0xC3_D2_E1_F0]
@staticmethod
def __lowerCAmelCase ( lowercase_ : Optional[int] ,lowercase_ : str ):
return ((n << b) | (n >> (3_2 - b))) & 0xFF_FF_FF_FF
def __lowerCAmelCase ( self : int ):
lowerCAmelCase__ : Any = B'''\x80''' + B'''\x00''' * (6_3 - (len(self.data ) + 8) % 6_4)
lowerCAmelCase__ : Tuple = self.data + padding + struct.pack('''>Q''' ,8 * len(self.data ) )
return padded_data
def __lowerCAmelCase ( self : List[str] ):
return [
self.padded_data[i : i + 6_4] for i in range(0 ,len(self.padded_data ) ,6_4 )
]
def __lowerCAmelCase ( self : Any ,lowercase_ : Union[str, Any] ):
lowerCAmelCase__ : List[Any] = list(struct.unpack('''>16L''' ,lowercase_ ) ) + [0] * 6_4
for i in range(1_6 ,8_0 ):
lowerCAmelCase__ : Optional[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) ,1 )
return w
def __lowerCAmelCase ( self : Union[str, Any] ):
lowerCAmelCase__ : int = self.padding()
lowerCAmelCase__ : Optional[Any] = self.split_blocks()
for block in self.blocks:
lowerCAmelCase__ : str = self.expand_block(lowercase_ )
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = self.h
for i in range(0 ,8_0 ):
if 0 <= i < 2_0:
lowerCAmelCase__ : Optional[Any] = (b & c) | ((~b) & d)
lowerCAmelCase__ : List[Any] = 0x5A_82_79_99
elif 2_0 <= i < 4_0:
lowerCAmelCase__ : Any = b ^ c ^ d
lowerCAmelCase__ : List[str] = 0x6E_D9_EB_A1
elif 4_0 <= i < 6_0:
lowerCAmelCase__ : List[str] = (b & c) | (b & d) | (c & d)
lowerCAmelCase__ : Union[str, Any] = 0x8F_1B_BC_DC
elif 6_0 <= i < 8_0:
lowerCAmelCase__ : Dict = b ^ c ^ d
lowerCAmelCase__ : Tuple = 0xCA_62_C1_D6
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ : Any = (
self.rotate(lowercase_ ,5 ) + f + e + k + expanded_block[i] & 0xFF_FF_FF_FF,
a,
self.rotate(lowercase_ ,3_0 ),
c,
d,
)
lowerCAmelCase__ : Dict = (
self.h[0] + a & 0xFF_FF_FF_FF,
self.h[1] + b & 0xFF_FF_FF_FF,
self.h[2] + c & 0xFF_FF_FF_FF,
self.h[3] + d & 0xFF_FF_FF_FF,
self.h[4] + e & 0xFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : Any = b'''Test String'''
assert SHAaHash(UpperCamelCase_ ).final_hash() == hashlib.shaa(UpperCamelCase_ ).hexdigest() # noqa: S324
def __SCREAMING_SNAKE_CASE ( ):
lowerCAmelCase__ : List[str] = argparse.ArgumentParser(description='''Process some strings or files''' )
parser.add_argument(
'''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument('''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
lowerCAmelCase__ : int = parser.parse_args()
lowerCAmelCase__ : Any = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
lowerCAmelCase__ : Tuple = f.read()
else:
lowerCAmelCase__ : str = bytes(UpperCamelCase_ , '''utf-8''' )
print(SHAaHash(UpperCamelCase_ ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 450 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Tuple = 'Salesforce/blip-image-captioning-base'
snake_case__ :List[Any] = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
snake_case__ :List[Any] = 'image_captioner'
snake_case__ :Optional[int] = AutoModelForVisionaSeq
snake_case__ :Optional[int] = ['image']
snake_case__ :Any = ['text']
def __init__( self : str , *__magic_name__ : List[str] , **__magic_name__ : Tuple ):
"""simple docstring"""
requires_backends(self , ["vision"] )
super().__init__(*__magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : "Image" ):
"""simple docstring"""
return self.pre_processor(images=__magic_name__ , return_tensors="pt" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Tuple ):
"""simple docstring"""
return self.model.generate(**__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] ):
"""simple docstring"""
return self.pre_processor.batch_decode(__magic_name__ , skip_special_tokens=__magic_name__ )[0].strip()
| 48 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :List[Any] = DiTPipeline
lowerCamelCase :List[str] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
lowerCamelCase :List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
lowerCamelCase :Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
lowerCamelCase :Dict = False
def UpperCAmelCase ( self ) -> int:
torch.manual_seed(0 )
_A = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCAmelCase_ , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=10_00 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=lowerCAmelCase_ , )
_A = AutoencoderKL()
_A = DDIMScheduler()
_A = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler}
return components
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Optional[int]:
if str(lowerCAmelCase_ ).startswith("""mps""" ):
_A = torch.manual_seed(lowerCAmelCase_ )
else:
_A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
_A = {
"""class_labels""": [1],
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = """cpu"""
_A = self.get_dummy_components()
_A = self.pipeline_class(**lowerCAmelCase_ )
pipe.to(lowerCAmelCase_ )
pipe.set_progress_bar_config(disable=lowerCAmelCase_ )
_A = self.get_dummy_inputs(lowerCAmelCase_ )
_A = pipe(**lowerCAmelCase_ ).images
_A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_A = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCAmelCase_ , 1E-3 )
def UpperCAmelCase ( self ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase_ , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self ) -> str:
_A = torch.manual_seed(0 )
_A = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" )
pipe.to("""cuda""" )
_A = ["""vase""", """umbrella""", """white shark""", """white wolf"""]
_A = pipe.get_label_ids(lowerCAmelCase_ )
_A = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=40 , output_type="""np""" ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def UpperCAmelCase ( self ) -> Dict:
_A = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" )
_A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("""cuda""" )
_A = ["""vase""", """umbrella"""]
_A = pipe.get_label_ids(lowerCAmelCase_ )
_A = torch.manual_seed(0 )
_A = pipe(lowerCAmelCase_ , generator=lowerCAmelCase_ , num_inference_steps=25 , output_type="""np""" ).images
for word, image in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
_A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 401 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : Union[str, Any] = "▁"
UpperCAmelCase__ : List[str] = {"vocab_file": "sentencepiece.bpe.model"}
UpperCAmelCase__ : Union[str, Any] = {
"vocab_file": {
"facebook/mbart-large-50-one-to-many-mmt": (
"https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model"
),
}
}
UpperCAmelCase__ : Optional[Any] = {
"facebook/mbart-large-50-one-to-many-mmt": 10_24,
}
# fmt: off
UpperCAmelCase__ : Tuple = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"]
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Optional[int] = VOCAB_FILES_NAMES
snake_case__ :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ :Any = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Tuple = ['input_ids', 'attention_mask']
snake_case__ :List[int] = []
snake_case__ :List[int] = []
def __init__( self : int , __magic_name__ : int , __magic_name__ : Dict=None , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[int]="</s>" , __magic_name__ : List[Any]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Tuple="<unk>" , __magic_name__ : List[Any]="<pad>" , __magic_name__ : List[Any]="<mask>" , __magic_name__ : Optional[Dict[str, Any]] = None , **__magic_name__ : List[Any] , ):
"""simple docstring"""
lowerCAmelCase__ = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
lowerCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCAmelCase__ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__magic_name__ , tgt_lang=__magic_name__ , eos_token=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , sp_model_kwargs=self.sp_model_kwargs , **__magic_name__ , )
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__magic_name__ ) )
lowerCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase__ = 1
lowerCAmelCase__ = len(self.sp_model )
lowerCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__magic_name__ )
}
lowerCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
lowerCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCAmelCase__ = src_lang if src_lang is not None else "en_XX"
lowerCAmelCase__ = self.lang_code_to_id[self._src_lang]
lowerCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.__dict__.copy()
lowerCAmelCase__ = None
return state
def __setstate__( self : List[Any] , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCAmelCase__ = {}
lowerCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = {self.convert_ids_to_tokens(__magic_name__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : str ):
"""simple docstring"""
return self.sp_model.encode(__magic_name__ , out_type=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase__ = self.sp_model.PieceToId(__magic_name__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : int ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = ""
lowerCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__magic_name__ ) + token
lowerCAmelCase__ = True
lowerCAmelCase__ = []
else:
current_sub_tokens.append(__magic_name__ )
lowerCAmelCase__ = False
out_string += self.sp_model.decode(__magic_name__ )
return out_string.strip()
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __magic_name__ )
elif not os.path.isfile(self.vocab_file ):
with open(__magic_name__ , "wb" ) as fi:
lowerCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__magic_name__ )
return (out_vocab_file,)
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None , __magic_name__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__magic_name__ , token_ids_a=__magic_name__ , already_has_special_tokens=__magic_name__ )
lowerCAmelCase__ = [1] * len(self.prefix_tokens )
lowerCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__magic_name__ )) + suffix_ones
return prefix_ones + ([0] * len(__magic_name__ )) + ([0] * len(__magic_name__ )) + suffix_ones
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = self.convert_tokens_to_ids(__magic_name__ )
lowerCAmelCase__ = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Union[str, Any] , ):
"""simple docstring"""
lowerCAmelCase__ = src_lang
lowerCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[src_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.lang_code_to_id[tgt_lang]
lowerCAmelCase__ = [self.cur_lang_code_id]
lowerCAmelCase__ = [self.eos_token_id]
| 48 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 451 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : List[Any] ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
if start < end:
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ ,lowerCAmelCase__ = _in_place_partition(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
count += _in_place_quick_sort(UpperCamelCase_ , UpperCamelCase_ , p - 1 )
count += _in_place_quick_sort(UpperCamelCase_ , p + 1 , UpperCamelCase_ )
return count
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] , UpperCamelCase_ : Any ) -> Dict:
'''simple docstring'''
lowerCAmelCase__ = 0
lowerCAmelCase__ = randint(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = a[pivot]
lowerCAmelCase__ = temp
lowerCAmelCase__ = start - 1
for index in range(UpperCamelCase_ , UpperCamelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
lowerCAmelCase__ = new_pivot_index + 1
lowerCAmelCase__ = a[new_pivot_index]
lowerCAmelCase__ = a[index]
lowerCAmelCase__ = temp
lowerCAmelCase__ = a[new_pivot_index + 1]
lowerCAmelCase__ = a[end]
lowerCAmelCase__ = temp
return new_pivot_index + 1, count
UpperCAmelCase__ : Tuple = TemporaryFile()
UpperCAmelCase__ : List[str] = 1_00 # 1000 elements are to be sorted
UpperCAmelCase__ , UpperCAmelCase__ : Dict = 0, 1 # mean and standard deviation
UpperCAmelCase__ : Tuple = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print("The array is")
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase__ : Optional[Any] = np.load(outfile)
UpperCAmelCase__ : Any = len(M) - 1
UpperCAmelCase__ : Tuple = _in_place_quick_sort(M, 0, r)
print(
"No of Comparisons for 100 elements selected from a standard normal distribution"
"is :"
)
print(z)
| 48 | 0 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = IFInpaintingPipeline
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
__lowerCamelCase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__lowerCamelCase = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
return self._get_dummy_components()
def UpperCamelCase ( self , lowercase , lowercase=0 ) -> List[str]:
'''simple docstring'''
if str(lowercase ).startswith("mps" ):
A__ = torch.manual_seed(lowercase )
else:
A__ = torch.Generator(device=lowercase ).manual_seed(lowercase )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def UpperCamelCase ( self ) -> Dict:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self ) -> List[str]:
'''simple docstring'''
self._test_save_load_local()
def UpperCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 514 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def A ( UpperCamelCase_ : List[Any] ) -> Tuple:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCAmelCase__ = name.replace("img_encoder.pos_embed" , "vision_model.embeddings.position_embeddings" )
if "img_encoder.patch_embed.proj" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.proj" , "vision_model.embeddings.patch_embeddings.projection" )
if "img_encoder.patch_embed.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.patch_embed.norm" , "vision_model.embeddings.layernorm" )
if "img_encoder.layers" in name:
lowerCAmelCase__ = name.replace("img_encoder.layers" , "vision_model.encoder.stages" )
if "blocks" in name and "res" not in name:
lowerCAmelCase__ = name.replace("blocks" , "layers" )
if "attn" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("attn" , "self_attn" )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCAmelCase__ = name.replace("proj" , "out_proj" )
if "pre_assign_attn.attn.proj" in name:
lowerCAmelCase__ = name.replace("pre_assign_attn.attn.proj" , "pre_assign_attn.attn.out_proj" )
if "norm1" in name:
lowerCAmelCase__ = name.replace("norm1" , "layer_norm1" )
if "norm2" in name and "pre_assign" not in name:
lowerCAmelCase__ = name.replace("norm2" , "layer_norm2" )
if "img_encoder.norm" in name:
lowerCAmelCase__ = name.replace("img_encoder.norm" , "vision_model.layernorm" )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.token_embedding" , "text_model.embeddings.token_embedding" )
if "text_encoder.positional_embedding" in name:
lowerCAmelCase__ = name.replace("text_encoder.positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "text_encoder.transformer.resblocks." in name:
lowerCAmelCase__ = name.replace("text_encoder.transformer.resblocks." , "text_model.encoder.layers." )
if "ln_1" in name:
lowerCAmelCase__ = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
lowerCAmelCase__ = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
lowerCAmelCase__ = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
lowerCAmelCase__ = name.replace("c_proj" , "fc2" )
if "text_encoder" in name:
lowerCAmelCase__ = name.replace("text_encoder" , "text_model" )
if "ln_final" in name:
lowerCAmelCase__ = name.replace("ln_final" , "final_layer_norm" )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_hidden." , "visual_projection." )
if "img_projector.linear_out." in name:
lowerCAmelCase__ = name.replace("img_projector.linear_out." , "visual_projection.3." )
if "text_projector.linear_hidden" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_hidden" , "text_projection" )
if "text_projector.linear_out" in name:
lowerCAmelCase__ = name.replace("text_projector.linear_out" , "text_projection.3" )
return name
def A ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCAmelCase__ = orig_state_dict.pop(UpperCamelCase_ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ ,lowerCAmelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCAmelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[dim : dim * 2, :]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCAmelCase__ = key.split("." )
lowerCAmelCase__ = int(key_split[3] )
lowerCAmelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCAmelCase__ = val[:dim, :]
lowerCAmelCase__ = val[
dim : dim * 2, :
]
lowerCAmelCase__ = val[-dim:, :]
else:
lowerCAmelCase__ = val[:dim]
lowerCAmelCase__ = val[dim : dim * 2]
lowerCAmelCase__ = val[-dim:]
else:
lowerCAmelCase__ = rename_key(UpperCamelCase_ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCAmelCase__ = val.squeeze_()
else:
lowerCAmelCase__ = val
return orig_state_dict
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw )
return im
@torch.no_grad()
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any , UpperCamelCase_ : Tuple="groupvit-gcc-yfcc" , UpperCamelCase_ : Dict=False ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = GroupViTConfig()
lowerCAmelCase__ = GroupViTModel(UpperCamelCase_ ).eval()
lowerCAmelCase__ = torch.load(UpperCamelCase_ , map_location="cpu" )["model"]
lowerCAmelCase__ = convert_state_dict(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ ,lowerCAmelCase__ = model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCamelCase_ ) == 0)
# verify result
lowerCAmelCase__ = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32" )
lowerCAmelCase__ = prepare_img()
lowerCAmelCase__ = processor(text=["a photo of a cat", "a photo of a dog"] , images=UpperCamelCase_ , padding=UpperCamelCase_ , return_tensors="pt" )
with torch.no_grad():
lowerCAmelCase__ = model(**UpperCamelCase_ )
if model_name == "groupvit-gcc-yfcc":
lowerCAmelCase__ = torch.tensor([[13.3_523, 6.3_629]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCAmelCase__ = torch.tensor([[16.1_873, 8.6_230]] )
else:
raise ValueError(F"""Model name {model_name} not supported.""" )
assert torch.allclose(outputs.logits_per_image , UpperCamelCase_ , atol=1E-3 )
processor.save_pretrained(UpperCamelCase_ )
model.save_pretrained(UpperCamelCase_ )
print("Successfully saved processor and model to" , UpperCamelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
processor.push_to_hub(UpperCamelCase_ , organization="nielsr" )
model.push_to_hub(UpperCamelCase_ , organization="nielsr" )
if __name__ == "__main__":
UpperCAmelCase__ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 48 | 0 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase__ = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase__ = importlib.util.spec_from_file_location(
'''transformers''',
os.path.join(PATH_TO_TRANSFORMERS, '''__init__.py'''),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase__ = spec.loader.load_module()
UpperCAmelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase__ = re.compile('''\[(.+?)\]\((https://huggingface\.co/.+?)\)''')
UpperCAmelCase__ = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def UpperCAmelCase__( ):
"""simple docstring"""
__A= []
for config_class in list(CONFIG_MAPPING.values() ):
__A= False
# source code of `config_class`
__A= inspect.getsource(UpperCamelCase_ )
__A= _re_checkpoint.findall(UpperCamelCase_ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
__A, __A= checkpoint
# verify the checkpoint name corresponds to the checkpoint link
__A= f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
__A= True
break
__A= config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(UpperCamelCase_ )
if len(UpperCamelCase_ ) > 0:
__A= '\n'.join(sorted(UpperCamelCase_ ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 186 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
UpperCAmelCase__ : Optional[Any] = 1_00
UpperCAmelCase__ : Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
UpperCAmelCase__ : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_00 )
def A ( UpperCamelCase_ : int ) -> set[int]:
'''simple docstring'''
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowerCAmelCase__ = set()
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def A ( UpperCamelCase_ : int = 50_00 ) -> int | None:
'''simple docstring'''
for number_to_partition in range(1 , UpperCamelCase_ ):
if len(partition(UpperCamelCase_ ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 48 | 0 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : Tuple = [[] for _ in range(UpperCamelCase_ )]
A_ : Any = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1 or len(UpperCamelCase_ ) <= key:
return input_string
for position, character in enumerate(UpperCamelCase_ ):
A_ : Tuple = position % (lowest * 2) # puts it in bounds
A_ : List[str] = min(UpperCamelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(UpperCamelCase_ )
A_ : Optional[int] = ["""""".join(UpperCamelCase_ ) for row in temp_grid]
A_ : Dict = """""".join(UpperCamelCase_ )
return output_string
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : str = []
A_ : int = key - 1
if key <= 0:
raise ValueError("""Height of grid can't be 0 or negative""" )
if key == 1:
return input_string
A_ : List[Any] = [[] for _ in range(UpperCamelCase_ )] # generates template
for position in range(len(UpperCamelCase_ ) ):
A_ : Dict = position % (lowest * 2) # puts it in bounds
A_ : Tuple = min(UpperCamelCase_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("""*""" )
A_ : Optional[int] = 0
for row in temp_grid: # fills in the characters
A_ : Union[str, Any] = input_string[counter : counter + len(UpperCamelCase_ )]
grid.append(list(UpperCamelCase_ ) )
counter += len(UpperCamelCase_ )
A_ : Union[str, Any] = """""" # reads as zigzag
for position in range(len(UpperCamelCase_ ) ):
A_ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
A_ : Tuple = min(UpperCamelCase_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def a ( lowerCamelCase__ ):
'''simple docstring'''
A_ : Optional[int] = {}
for key_guess in range(1 , len(UpperCamelCase_ ) ): # tries every key
A_ : List[Any] = decrypt(UpperCamelCase_ , UpperCamelCase_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {"vocab_file": "vocab.json"}
UpperCAmelCase__ : Optional[Any] = {
"vocab_file": {
"mgp-str": "https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json",
}
}
UpperCAmelCase__ : Union[str, Any] = {"mgp-str": 27}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = VOCAB_FILES_NAMES
snake_case__ :Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ :Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int="[GO]" , __magic_name__ : Optional[Any]="[GO]" , __magic_name__ : List[str]="[s]" , __magic_name__ : str="[GO]" , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(
unk_token=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , pad_token=__magic_name__ , **__magic_name__ , )
with open(__magic_name__ , encoding="utf-8" ) as vocab_handle:
lowerCAmelCase__ = json.load(__magic_name__ )
lowerCAmelCase__ = {v: k for k, v in self.vocab.items()}
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.vocab )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Dict ):
"""simple docstring"""
lowerCAmelCase__ = []
for s in text:
char_tokens.extend(__magic_name__ )
return char_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ):
"""simple docstring"""
return self.vocab.get(__magic_name__ , self.vocab.get(self.unk_token ) )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : Tuple ):
"""simple docstring"""
return self.decoder.get(__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : str , __magic_name__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__magic_name__ ):
logger.error("Vocabulary path ({}) should be a directory".format(__magic_name__ ) )
return
lowerCAmelCase__ = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__magic_name__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__magic_name__ , ensure_ascii=__magic_name__ ) + "\n" )
return (vocab_file,)
| 48 | 0 |
import tempfile
import unittest
import numpy as np
from diffusers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionPipeline,
PNDMScheduler,
)
from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __lowercase ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'
def snake_case ( self , __UpperCAmelCase=0 ) -> List[str]:
A : Any = np.random.RandomState(__UpperCAmelCase )
A : List[str] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def snake_case ( self ) -> Optional[Any]:
A : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Union[str, Any] = self.get_dummy_inputs()
A : str = pipe(**__UpperCAmelCase ).images
A : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A : Union[str, Any] = np.array([0.6_5_0_7_2, 0.5_8_4_9_2, 0.4_8_2_1_9, 0.5_5_5_2_1, 0.5_3_1_8_0, 0.5_5_9_3_9, 0.5_0_6_9_7, 0.3_9_8_0_0, 0.4_6_4_5_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ) -> str:
A : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A : str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Optional[int] = self.get_dummy_inputs()
A : str = pipe(**__UpperCAmelCase ).images
A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A : List[Any] = np.array([0.6_5_8_6_3, 0.5_9_4_2_5, 0.4_9_3_2_6, 0.5_6_3_1_3, 0.5_3_8_7_5, 0.5_6_6_2_7, 0.5_1_0_6_5, 0.3_9_7_7_7, 0.4_6_3_3_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ) -> Optional[int]:
A : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : str = self.get_dummy_inputs()
A : Optional[int] = pipe(**__UpperCAmelCase ).images
A : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A : Optional[Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ) -> int:
A : Dict = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A : Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : List[str] = self.get_dummy_inputs()
A : Dict = pipe(**__UpperCAmelCase ).images
A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A : Optional[Any] = np.array([0.5_3_7_5_5, 0.6_0_7_8_6, 0.4_7_4_0_2, 0.4_9_4_8_8, 0.5_1_8_6_9, 0.4_9_8_1_9, 0.4_7_9_8_5, 0.3_8_9_5_7, 0.4_4_2_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ) -> str:
A : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A : Dict = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Union[str, Any] = self.get_dummy_inputs()
A : Optional[int] = pipe(**__UpperCAmelCase ).images
A : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A : Optional[int] = np.array([0.5_3_8_1_7, 0.6_0_8_1_2, 0.4_7_3_8_4, 0.4_9_5_3_0, 0.5_1_8_9_4, 0.4_9_8_1_4, 0.4_7_9_8_4, 0.3_8_9_5_8, 0.4_4_2_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ) -> Union[str, Any]:
A : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
A : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Dict = self.get_dummy_inputs()
A : int = pipe(**__UpperCAmelCase ).images
A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
A : Dict = np.array([0.5_3_8_9_5, 0.6_0_8_0_8, 0.4_7_9_3_3, 0.4_9_6_0_8, 0.5_1_8_8_6, 0.4_9_9_5_0, 0.4_8_0_5_3, 0.3_8_9_5_7, 0.4_4_2_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case ( self ) -> Optional[Any]:
A : List[str] = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Tuple = self.get_dummy_inputs()
A : Optional[Any] = 3 * [inputs['''prompt''']]
# forward
A : Union[str, Any] = pipe(**__UpperCAmelCase )
A : Optional[int] = output.images[0, -3:, -3:, -1]
A : List[Any] = self.get_dummy_inputs()
A : int = 3 * [inputs.pop('''prompt''' )]
A : Union[str, Any] = pipe.tokenizer(
__UpperCAmelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='''np''' , )
A : str = text_inputs['''input_ids''']
A : Optional[int] = pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0]
A : Dict = prompt_embeds
# forward
A : List[Any] = pipe(**__UpperCAmelCase )
A : Optional[int] = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
def snake_case ( self ) -> Tuple:
A : int = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Tuple = self.get_dummy_inputs()
A : List[str] = 3 * ['''this is a negative prompt''']
A : Union[str, Any] = negative_prompt
A : Optional[int] = 3 * [inputs['''prompt''']]
# forward
A : int = pipe(**__UpperCAmelCase )
A : Optional[int] = output.images[0, -3:, -3:, -1]
A : Dict = self.get_dummy_inputs()
A : Union[str, Any] = 3 * [inputs.pop('''prompt''' )]
A : Union[str, Any] = []
for p in [prompt, negative_prompt]:
A : Dict = pipe.tokenizer(
__UpperCAmelCase , padding='''max_length''' , max_length=pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors='''np''' , )
A : List[Any] = text_inputs['''input_ids''']
embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.intaa ) )[0] )
A , A : Tuple = embeds
# forward
A : Optional[Any] = pipe(**__UpperCAmelCase )
A : Dict = output.images[0, -3:, -3:, -1]
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@nightly
@require_onnxruntime
@require_torch_gpu
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case ( self ) -> Tuple:
A : List[str] = ort.SessionOptions()
A : Optional[Any] = False
return options
def snake_case ( self ) -> int:
A : Any = OnnxStableDiffusionPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : str = '''A painting of a squirrel eating a burger'''
np.random.seed(0 )
A : Union[str, Any] = sd_pipe([prompt] , guidance_scale=6.0 , num_inference_steps=10 , output_type='''np''' )
A : Union[str, Any] = output.images
A : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A : int = np.array([0.0_4_5_2, 0.0_3_9_0, 0.0_0_8_7, 0.0_3_5_0, 0.0_6_1_7, 0.0_3_6_4, 0.0_5_4_4, 0.0_5_2_3, 0.0_7_2_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ) -> List[Any]:
A : Union[str, Any] = DDIMScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
A : str = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Tuple = '''open neural network exchange'''
A : str = np.random.RandomState(0 )
A : Optional[int] = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type='''np''' )
A : Tuple = output.images
A : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A : Optional[Any] = np.array([0.2_8_6_7, 0.1_9_7_4, 0.1_4_8_1, 0.7_2_9_4, 0.7_2_5_1, 0.6_6_6_7, 0.4_1_9_4, 0.5_6_4_2, 0.6_4_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ) -> Any:
A : int = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
A : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
sd_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : Optional[Any] = '''open neural network exchange'''
A : List[Any] = np.random.RandomState(0 )
A : Dict = sd_pipe([prompt] , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type='''np''' )
A : str = output.images
A : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A : Any = np.array([0.2_3_0_6, 0.1_9_5_9, 0.1_5_9_3, 0.6_5_4_9, 0.6_3_9_4, 0.5_4_0_8, 0.5_0_6_5, 0.6_0_1_0, 0.6_1_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def snake_case ( self ) -> Union[str, Any]:
A : Dict = 0
def test_callback_fn(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> None:
A : str = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
assert latents.shape == (1, 4, 64, 64)
A : int = latents[0, -3:, -3:, -1]
A : List[Any] = np.array(
[-0.6_7_7_2, -0.3_8_3_5, -1.2_4_5_6, 0.1_9_0_5, -1.0_9_7_4, 0.6_9_6_7, -1.9_3_5_3, 0.0_1_7_8, 1.0_1_6_7] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
elif step == 5:
assert latents.shape == (1, 4, 64, 64)
A : Tuple = latents[0, -3:, -3:, -1]
A : List[str] = np.array(
[-0.3_3_5_1, 0.2_2_4_1, -0.1_8_3_7, -0.2_3_2_5, -0.6_5_7_7, 0.3_3_9_3, -0.0_2_4_1, 0.5_8_9_9, 1.3_8_7_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 1E-3
A : Optional[Any] = False
A : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A : int = '''Andromeda galaxy in a bottle'''
A : Optional[Any] = np.random.RandomState(0 )
pipe(
prompt=__UpperCAmelCase , num_inference_steps=5 , guidance_scale=7.5 , generator=__UpperCAmelCase , callback=__UpperCAmelCase , callback_steps=1 , )
assert test_callback_fn.has_been_called
assert number_of_steps == 6
def snake_case ( self ) -> str:
A : Tuple = OnnxStableDiffusionPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
assert isinstance(__UpperCAmelCase , __UpperCAmelCase )
assert pipe.safety_checker is None
A : str = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCAmelCase )
A : Any = OnnxStableDiffusionPipeline.from_pretrained(__UpperCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
A : Optional[int] = pipe('''example prompt''' , num_inference_steps=2 ).images[0]
assert image is not None
| 542 |
'''simple docstring'''
from math import sqrt
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = 0
for i in range(1 , int(sqrt(UpperCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(UpperCamelCase_ ):
total += i + n // i
elif i == sqrt(UpperCamelCase_ ):
total += i
return total - n
def A ( UpperCamelCase_ : int = 1_00_00 ) -> int:
'''simple docstring'''
lowerCAmelCase__ = sum(
i
for i in range(1 , UpperCamelCase_ )
if sum_of_divisors(sum_of_divisors(UpperCamelCase_ ) ) == i and sum_of_divisors(UpperCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 48 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
_UpperCAmelCase : Tuple = logging.get_logger(__name__)
_UpperCAmelCase : Optional[int] = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
_UpperCAmelCase : List[Any] = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
_UpperCAmelCase : Tuple = {
"roberta-base": 5_12,
"roberta-large": 5_12,
"roberta-large-mnli": 5_12,
"distilroberta-base": 5_12,
"roberta-base-openai-detector": 5_12,
"roberta-large-openai-detector": 5_12,
}
class lowercase_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['input_ids', 'attention_mask']
__lowerCAmelCase = RobertaTokenizer
def __init__( self : Union[str, Any], UpperCamelCase__ : Any=None, UpperCamelCase__ : Union[str, Any]=None, UpperCamelCase__ : Tuple=None, UpperCamelCase__ : int="replace", UpperCamelCase__ : Optional[int]="<s>", UpperCamelCase__ : List[Any]="</s>", UpperCamelCase__ : Tuple="</s>", UpperCamelCase__ : int="<s>", UpperCamelCase__ : Optional[int]="<unk>", UpperCamelCase__ : Tuple="<pad>", UpperCamelCase__ : List[str]="<mask>", UpperCamelCase__ : Dict=False, UpperCamelCase__ : Union[str, Any]=True, **UpperCamelCase__ : int, ) -> Dict:
super().__init__(
UpperCamelCase__, UpperCamelCase__, tokenizer_file=UpperCamelCase__, errors=UpperCamelCase__, bos_token=UpperCamelCase__, eos_token=UpperCamelCase__, sep_token=UpperCamelCase__, cls_token=UpperCamelCase__, unk_token=UpperCamelCase__, pad_token=UpperCamelCase__, mask_token=UpperCamelCase__, add_prefix_space=UpperCamelCase__, trim_offsets=UpperCamelCase__, **UpperCamelCase__, )
_A = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', UpperCamelCase__ ) != add_prefix_space:
_A = getattr(UpperCamelCase__, pre_tok_state.pop('type' ) )
_A = add_prefix_space
_A = pre_tok_class(**UpperCamelCase__ )
_A = add_prefix_space
_A = 'post_processor'
_A = getattr(self.backend_tokenizer, UpperCamelCase__, UpperCamelCase__ )
if tokenizer_component_instance:
_A = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_A = tuple(state['sep'] )
if "cls" in state:
_A = tuple(state['cls'] )
_A = False
if state.get('add_prefix_space', UpperCamelCase__ ) != add_prefix_space:
_A = add_prefix_space
_A = True
if state.get('trim_offsets', UpperCamelCase__ ) != trim_offsets:
_A = trim_offsets
_A = True
if changes_to_apply:
_A = getattr(UpperCamelCase__, state.pop('type' ) )
_A = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer, UpperCamelCase__, UpperCamelCase__ )
@property
def __UpperCAmelCase ( self : int ) -> Tuple:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def __UpperCAmelCase ( self : List[str], UpperCamelCase__ : Tuple ) -> List[str]:
_A = AddedToken(UpperCamelCase__, lstrip=UpperCamelCase__, rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__, UpperCamelCase__ ) else value
_A = value
def __UpperCAmelCase ( self : int, *UpperCamelCase__ : Union[str, Any], **UpperCamelCase__ : Any ) -> Optional[int]:
_A = kwargs.get('is_split_into_words', UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__, **UpperCamelCase__ )
def __UpperCAmelCase ( self : Any, *UpperCamelCase__ : Any, **UpperCamelCase__ : Tuple ) -> Optional[int]:
_A = kwargs.get('is_split_into_words', UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__, **UpperCamelCase__ )
def __UpperCAmelCase ( self : str, UpperCamelCase__ : str, UpperCamelCase__ : Optional[str] = None ) -> Tuple:
_A = self._tokenizer.model.save(UpperCamelCase__, name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[int]=None ) -> Any:
_A = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __UpperCAmelCase ( self : Tuple, UpperCamelCase__ : List[int], UpperCamelCase__ : Optional[List[int]] = None ) -> Dict:
_A = [self.sep_token_id]
_A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 107 |
'''simple docstring'''
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def A ( UpperCamelCase_ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return input_array.reshape((input_array.size, 1) )
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.mean(1 )
# Centralize the data of class i
lowerCAmelCase__ = data - column_reshape(UpperCamelCase_ )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(UpperCamelCase_ , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
lowerCAmelCase__ = features.mean(1 )
lowerCAmelCase__ = np.nan
for i in range(UpperCamelCase_ ):
lowerCAmelCase__ = features[:, labels == i]
lowerCAmelCase__ = data.shape[1]
lowerCAmelCase__ = data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
lowerCAmelCase__ = device_data * np.dot(
column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ ) , (column_reshape(UpperCamelCase_ ) - column_reshape(UpperCamelCase_ )).T , )
return covariance_sum / features.shape[1]
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
if features.any():
lowerCAmelCase__ = features.mean(1 )
# Center the dataset
lowerCAmelCase__ = features - np.reshape(UpperCamelCase_ , (data_mean.size, 1) )
lowerCAmelCase__ = np.dot(UpperCamelCase_ , centered_data.T ) / features.shape[1]
lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.eigh(UpperCamelCase_ )
# Take all the columns in the reverse order (-1), and then takes only the first
lowerCAmelCase__ = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
lowerCAmelCase__ = np.dot(filtered_eigenvectors.T , UpperCamelCase_ )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( UpperCamelCase_ : np.ndarray , UpperCamelCase_ : np.ndarray , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> np.ndarray:
'''simple docstring'''
assert classes > dimensions
# Check if features have been already loaded
if features.any:
lowerCAmelCase__ ,lowerCAmelCase__ = eigh(
covariance_between_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , covariance_within_classes(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) , )
lowerCAmelCase__ = eigenvectors[:, ::-1][:, :dimensions]
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = np.linalg.svd(UpperCamelCase_ )
lowerCAmelCase__ = svd_matrix[:, 0:dimensions]
lowerCAmelCase__ = np.dot(filtered_svd_matrix.T , UpperCamelCase_ )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=UpperCamelCase_ )
logging.error("Dataset empty" )
raise AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
lowerCAmelCase__ = np.array([0, 0, 0, 1, 1] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = linear_discriminant_analysis(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if isinstance(UpperCamelCase_ , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def A ( ) -> None:
'''simple docstring'''
lowerCAmelCase__ = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
lowerCAmelCase__ = 2
lowerCAmelCase__ = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]] )
with pytest.raises(UpperCamelCase_ ) as error_info:
lowerCAmelCase__ = principal_component_analysis(UpperCamelCase_ , UpperCamelCase_ )
if not np.allclose(UpperCamelCase_ , UpperCamelCase_ ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[Any] , A : Optional[Any] , A : str=1_3 , A : List[str]=7 , A : Tuple=True , A : Tuple=True , A : str=True , A : int=True , A : int=9_9 , A : List[str]=[1, 1, 2] , A : Dict=1 , A : Tuple=3_2 , A : Any=4 , A : Tuple=8 , A : Optional[Any]=3_7 , A : Tuple="gelu_new" , A : Union[str, Any]=0.1 , A : List[str]=0.1 , A : Tuple=0.0 , A : int=5_1_2 , A : Optional[int]=3 , A : List[str]=0.02 , A : Dict=3 , A : List[Any]=4 , A : Any=None , A : Dict=False , ) ->Union[str, Any]:
lowerCamelCase__ : Tuple = parent
lowerCamelCase__ : str = batch_size
lowerCamelCase__ : List[Any] = seq_length
lowerCamelCase__ : Optional[int] = is_training
lowerCamelCase__ : Dict = use_input_mask
lowerCamelCase__ : Dict = use_token_type_ids
lowerCamelCase__ : int = use_labels
lowerCamelCase__ : Dict = vocab_size
lowerCamelCase__ : Optional[Any] = block_sizes
lowerCamelCase__ : Union[str, Any] = num_decoder_layers
lowerCamelCase__ : int = d_model
lowerCamelCase__ : List[str] = n_head
lowerCamelCase__ : Optional[Any] = d_head
lowerCamelCase__ : Optional[Any] = d_inner
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : int = hidden_dropout
lowerCamelCase__ : Dict = attention_dropout
lowerCamelCase__ : Union[str, Any] = activation_dropout
lowerCamelCase__ : Any = max_position_embeddings
lowerCamelCase__ : List[Any] = type_vocab_size
lowerCamelCase__ : Union[str, Any] = 2
lowerCamelCase__ : int = num_labels
lowerCamelCase__ : Dict = num_choices
lowerCamelCase__ : str = scope
lowerCamelCase__ : List[Any] = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCamelCase__ : Tuple = n_head
# Used in the tests to check the size of the first hidden state
lowerCamelCase__ : Optional[Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCamelCase__ : Any = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCamelCase__ : Dict = self.num_hidden_layers + 2
def __lowerCamelCase ( self : List[Any] ) ->int:
lowerCamelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Optional[int] = None
if self.use_input_mask:
lowerCamelCase__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase__ : List[Any] = None
if self.use_token_type_ids:
lowerCamelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Optional[int] = None
lowerCamelCase__ : int = None
lowerCamelCase__ : Optional[int] = None
if self.use_labels:
lowerCamelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : int = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCamelCase ( self : Optional[Any] , A : Optional[int] , A : Optional[int] , A : Union[str, Any] , A : Any , A : List[Any] , A : Optional[int] , A : str , ) ->Optional[int]:
lowerCamelCase__ : Union[str, Any] = TFFunnelModel(config=A )
lowerCamelCase__ : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Optional[int] = model(A )
lowerCamelCase__ : Union[str, Any] = [input_ids, input_mask]
lowerCamelCase__ : Optional[Any] = model(A )
lowerCamelCase__ : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[Any] = TFFunnelModel(config=A )
lowerCamelCase__ : Dict = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCamelCase__ : Dict = False
lowerCamelCase__ : Optional[int] = TFFunnelModel(config=A )
lowerCamelCase__ : Any = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __lowerCamelCase ( self : Union[str, Any] , A : str , A : Any , A : List[Any] , A : Tuple , A : List[Any] , A : int , A : int , ) ->str:
lowerCamelCase__ : str = TFFunnelBaseModel(config=A )
lowerCamelCase__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Any = model(A )
lowerCamelCase__ : Any = [input_ids, input_mask]
lowerCamelCase__ : Any = model(A )
lowerCamelCase__ : Union[str, Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCamelCase__ : Optional[Any] = False
lowerCamelCase__ : Optional[Any] = TFFunnelBaseModel(config=A )
lowerCamelCase__ : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCamelCase__ : Any = False
lowerCamelCase__ : Any = TFFunnelBaseModel(config=A )
lowerCamelCase__ : int = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __lowerCamelCase ( self : str , A : Any , A : Union[str, Any] , A : Dict , A : List[Any] , A : str , A : Optional[Any] , A : List[str] , ) ->Any:
lowerCamelCase__ : List[Any] = TFFunnelForPreTraining(config=A )
lowerCamelCase__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Union[str, Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : str , A : int , A : Union[str, Any] , A : Optional[Any] , A : Dict , A : Dict , A : Dict , A : Dict , ) ->Dict:
lowerCamelCase__ : str = TFFunnelForMaskedLM(config=A )
lowerCamelCase__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Any = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self : Optional[int] , A : List[Any] , A : Optional[Any] , A : Any , A : Tuple , A : List[Any] , A : List[Any] , A : Any , ) ->Optional[Any]:
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[int] = TFFunnelForSequenceClassification(config=A )
lowerCamelCase__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : str = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self : Tuple , A : Optional[Any] , A : Optional[Any] , A : Optional[Any] , A : Any , A : Any , A : List[str] , A : List[str] , ) ->Tuple:
lowerCamelCase__ : Optional[Any] = self.num_choices
lowerCamelCase__ : Tuple = TFFunnelForMultipleChoice(config=A )
lowerCamelCase__ : int = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Optional[int] = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : Dict = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase__ : List[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowerCamelCase__ : int = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self : Tuple , A : Dict , A : Any , A : Union[str, Any] , A : int , A : int , A : Optional[int] , A : str , ) ->Union[str, Any]:
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Optional[int] = TFFunnelForTokenClassification(config=A )
lowerCamelCase__ : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : List[Any] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self : Union[str, Any] , A : Tuple , A : Optional[Any] , A : str , A : Dict , A : Optional[int] , A : str , A : List[str] , ) ->int:
lowerCamelCase__ : Union[str, Any] = TFFunnelForQuestionAnswering(config=A )
lowerCamelCase__ : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowerCamelCase__ : Optional[Any] = model(A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self : Union[str, Any] ) ->List[Any]:
lowerCamelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = config_and_inputs
lowerCamelCase__ : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,unittest.TestCase ):
_UpperCAmelCase : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_UpperCAmelCase : Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase : str = False
_UpperCAmelCase : Any = False
def __lowerCamelCase ( self : Union[str, Any] ) ->str:
lowerCamelCase__ : Optional[Any] = TFFunnelModelTester(self )
lowerCamelCase__ : Optional[int] = ConfigTester(self , config_class=A )
def __lowerCamelCase ( self : Optional[int] ) ->str:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : int ) ->Any:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def __lowerCamelCase ( self : Optional[int] ) ->Dict:
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A )
def __lowerCamelCase ( self : Tuple ) ->int:
lowerCamelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A )
def __lowerCamelCase ( self : Any ) ->Any:
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@require_tf
class __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ,unittest.TestCase ):
_UpperCAmelCase : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_UpperCAmelCase : int = False
_UpperCAmelCase : List[Any] = False
def __lowerCamelCase ( self : str ) ->Optional[int]:
lowerCamelCase__ : Tuple = TFFunnelModelTester(self , base=A )
lowerCamelCase__ : Union[str, Any] = ConfigTester(self , config_class=A )
def __lowerCamelCase ( self : Dict ) ->Tuple:
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : Dict ) ->Optional[Any]:
lowerCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*A )
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def __lowerCamelCase ( self : Optional[Any] ) ->Any:
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*A )
| 315 |
'''simple docstring'''
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> list:
'''simple docstring'''
lowerCAmelCase__ = word.split()
def justify(UpperCamelCase_ : list , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> str:
lowerCAmelCase__ = max_width - width
lowerCAmelCase__ = len(UpperCamelCase_ )
if len(UpperCamelCase_ ) == 1:
# if there is only word in line
# just insert overall_spaces_count for the remainder of line
return line[0] + " " * overall_spaces_count
else:
lowerCAmelCase__ = words_count - 1
# num_spaces_between_words_list[i] : tells you to insert
# num_spaces_between_words_list[i] spaces
# after word on line[i]
lowerCAmelCase__ = spaces_to_insert_between_words * [
overall_spaces_count // spaces_to_insert_between_words
]
lowerCAmelCase__ = (
overall_spaces_count % spaces_to_insert_between_words
)
# distribute spaces via round robin to the left words
for i in range(UpperCamelCase_ ):
num_spaces_between_words_list[i] += 1
lowerCAmelCase__ = []
for i in range(UpperCamelCase_ ):
# add the word
aligned_words_list.append(line[i] )
# add the spaces to insert
aligned_words_list.append(num_spaces_between_words_list[i] * " " )
# just add the last word to the sentence
aligned_words_list.append(line[-1] )
# join the aligned words list to form a justified line
return "".join(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
lowerCAmelCase__ = 0
for word in words:
if width + len(UpperCamelCase_ ) + len(UpperCamelCase_ ) <= max_width:
# keep adding words until we can fill out max_width
# width = sum of length of all words (without overall_spaces_count)
# len(word) = length of current word
# len(line) = number of overall_spaces_count to insert between words
line.append(UpperCamelCase_ )
width += len(UpperCamelCase_ )
else:
# justify the line and add it to result
answer.append(justify(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
# reset new line and new width
lowerCAmelCase__ ,lowerCAmelCase__ = [word], len(UpperCamelCase_ )
lowerCAmelCase__ = max_width - width - len(UpperCamelCase_ )
answer.append(" ".join(UpperCamelCase_ ) + (remaining_spaces + 1) * " " )
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 48 | 0 |
import re
from filelock import FileLock
try:
import nltk
snake_case__ : Union[str, Any] = True
except (ImportError, ModuleNotFoundError):
snake_case__ : Tuple = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def lowerCamelCase__ ( _lowerCamelCase ) ->str:
re.sub("<n>" , "" , UpperCamelCase_ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase_ ) )
| 408 |
'''simple docstring'''
import argparse
import json
import os
import sys
import tempfile
import unittest
from argparse import Namespace
from dataclasses import dataclass, field
from enum import Enum
from pathlib import Path
from typing import List, Literal, Optional
import yaml
from transformers import HfArgumentParser, TrainingArguments
from transformers.hf_argparser import make_choice_type_function, string_to_bool
# Since Python 3.10, we can use the builtin `|` operator for Union types
# See PEP 604: https://peps.python.org/pep-0604
UpperCAmelCase__ : str = sys.version_info >= (3, 10)
def A ( UpperCamelCase_ : Any=None , UpperCamelCase_ : List[Any]=None ) -> Optional[int]:
'''simple docstring'''
return field(default_factory=lambda: default , metadata=UpperCamelCase_ )
@dataclass
class A :
snake_case__ :int
snake_case__ :float
snake_case__ :str
snake_case__ :bool
@dataclass
class A :
snake_case__ :int = 42
snake_case__ :str = field(default='toto' , metadata={'help': 'help message'} )
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :Optional[bool] = None
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'titi'
snake_case__ :Optional[int] = 'toto'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'titi'
snake_case__ :str = 'toto'
snake_case__ :int = 42
@dataclass
class A :
snake_case__ :BasicEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.foo )
@dataclass
class A :
snake_case__ :MixedTypeEnum = "toto"
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = MixedTypeEnum(self.foo )
@dataclass
class A :
snake_case__ :Optional[int] = None
snake_case__ :Optional[float] = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :Optional[str] = None
snake_case__ :Optional[List[str]] = list_field(default=[] )
snake_case__ :Optional[List[int]] = list_field(default=[] )
@dataclass
class A :
snake_case__ :List[int] = list_field(default=[] )
snake_case__ :List[int] = list_field(default=[1, 2, 3] )
snake_case__ :List[str] = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
snake_case__ :List[float] = list_field(default=[0.1, 0.2, 0.3] )
@dataclass
class A :
snake_case__ :List[int] = field()
snake_case__ :str = field()
snake_case__ :BasicEnum = field()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = BasicEnum(self.required_enum )
@dataclass
class A :
snake_case__ :int
snake_case__ :"BasicEnum" = field()
snake_case__ :"Optional[bool]" = None
snake_case__ :"str" = field(default='toto' , metadata={'help': 'help message'} )
snake_case__ :"List[str]" = list_field(default=['Hallo', 'Bonjour', 'Hello'] )
if is_python_no_less_than_3_10:
@dataclass
class A :
snake_case__ :bool = False
snake_case__ :bool = True
snake_case__ :bool | None = None
@dataclass
class A :
snake_case__ :int | None = None
snake_case__ :float | None = field(default=SCREAMING_SNAKE_CASE__ , metadata={'help': 'help message'} )
snake_case__ :str | None = None
snake_case__ :list[str] | None = list_field(default=[] )
snake_case__ :list[int] | None = list_field(default=[] )
class A ( unittest.TestCase ):
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : argparse.ArgumentParser , __magic_name__ : argparse.ArgumentParser ):
"""simple docstring"""
self.assertEqual(len(a._actions ) , len(b._actions ) )
for x, y in zip(a._actions , b._actions ):
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
lowerCAmelCase__ = {k: v for k, v in vars(__magic_name__ ).items() if k != "container"}
# Choices with mixed type have custom function as "type"
# So we need to compare results directly for equality
if xx.get("choices" , __magic_name__ ) and yy.get("choices" , __magic_name__ ):
for expected_choice in yy["choices"] + xx["choices"]:
self.assertEqual(xx["type"](__magic_name__ ) , yy["type"](__magic_name__ ) )
del xx["type"], yy["type"]
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--bar" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--baz" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--flag" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = ["--foo", "1", "--baz", "quux", "--bar", "0.5"]
((lowerCAmelCase__) ,) = parser.parse_args_into_dataclasses(__magic_name__ , look_for_args_file=__magic_name__ )
self.assertFalse(example.flag )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=42 , type=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
expected.add_argument("--baz" , type=__magic_name__ , default=__magic_name__ , const=__magic_name__ , nargs="?" )
# A boolean no_* argument always has to come after its "default: True" regular counter-part
# and its default must be set to False
expected.add_argument("--no_baz" , action="store_false" , default=__magic_name__ , dest="baz" )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
lowerCAmelCase__ = [WithDefaultBoolExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--no_baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "--baz"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "True", "--baz", "True", "--opt", "True"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
lowerCAmelCase__ = parser.parse_args(["--foo", "False", "--baz", "False", "--opt", "False"] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , baz=__magic_name__ , opt=__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=["titi", "toto", 42] , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses([] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.toto )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "titi"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.titi )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
lowerCAmelCase__ = parser.parse_args_into_dataclasses(["--foo", "42"] )[0]
self.assertEqual(enum_ex.foo , MixedTypeEnum.fourtytwo )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
@dataclass
class A :
snake_case__ :Literal["titi", "toto", 42] = "toto"
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument(
"--foo" , default="toto" , choices=("titi", "toto", 42) , type=make_choice_type_function(["titi", "toto", 42] ) , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(args.foo , "toto" )
lowerCAmelCase__ = parser.parse_args(["--foo", "titi"] )
self.assertEqual(args.foo , "titi" )
lowerCAmelCase__ = parser.parse_args(["--foo", "42"] )
self.assertEqual(args.foo , 42 )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo_int" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--bar_int" , nargs="+" , default=[1, 2, 3] , type=__magic_name__ )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
expected.add_argument("--foo_float" , nargs="+" , default=[0.1, 0.2, 0.3] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(
__magic_name__ , Namespace(foo_int=[] , bar_int=[1, 2, 3] , foo_str=["Hallo", "Bonjour", "Hello"] , foo_float=[0.1, 0.2, 0.3] ) , )
lowerCAmelCase__ = parser.parse_args("--foo_int 1 --bar_int 2 3 --foo_str a b c --foo_float 0.1 0.7".split() )
self.assertEqual(__magic_name__ , Namespace(foo_int=[1] , bar_int=[2, 3] , foo_str=["a", "b", "c"] , foo_float=[0.1, 0.7] ) )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--bar" , default=__magic_name__ , type=__magic_name__ , help="help message" )
expected.add_argument("--baz" , default=__magic_name__ , type=__magic_name__ )
expected.add_argument("--ces" , nargs="+" , default=[] , type=__magic_name__ )
expected.add_argument("--des" , nargs="+" , default=[] , type=__magic_name__ )
lowerCAmelCase__ = [OptionalExample]
if is_python_no_less_than_3_10:
dataclass_types.append(__magic_name__ )
for dataclass_type in dataclass_types:
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_args([] )
self.assertEqual(__magic_name__ , Namespace(foo=__magic_name__ , bar=__magic_name__ , baz=__magic_name__ , ces=[] , des=[] ) )
lowerCAmelCase__ = parser.parse_args("--foo 12 --bar 3.14 --baz 42 --ces a b c --des 1 2 3".split() )
self.assertEqual(__magic_name__ , Namespace(foo=12 , bar=3.14 , baz="42" , ces=["a", "b", "c"] , des=[1, 2, 3] ) )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--required_list" , nargs="+" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument("--required_str" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = argparse.ArgumentParser()
expected.add_argument("--foo" , type=__magic_name__ , required=__magic_name__ )
expected.add_argument(
"--required_enum" , type=make_choice_type_function(["titi", "toto"] ) , choices=["titi", "toto"] , required=__magic_name__ , )
expected.add_argument("--opt" , type=__magic_name__ , default=__magic_name__ )
expected.add_argument("--baz" , default="toto" , type=__magic_name__ , help="help message" )
expected.add_argument("--foo_str" , nargs="+" , default=["Hallo", "Bonjour", "Hello"] , type=__magic_name__ )
self.argparsersEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
lowerCAmelCase__ = parser.parse_dict(__magic_name__ )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
"extra": 42,
}
self.assertRaises(__magic_name__ , parser.parse_dict , __magic_name__ , allow_extra_keys=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_json" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".json" , "w+" ) as f:
json.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".json" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
lowerCAmelCase__ = {
"foo": 12,
"bar": 3.14,
"baz": "42",
"flag": True,
}
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ = os.path.join(__magic_name__ , "temp_yaml" )
os.mkdir(__magic_name__ )
with open(temp_local_path + ".yaml" , "w+" ) as f:
yaml.dump(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = parser.parse_yaml_file(Path(temp_local_path + ".yaml" ) )[0]
lowerCAmelCase__ = BasicExample(**__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = HfArgumentParser(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
| 48 | 0 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def __a ( __UpperCAmelCase : Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ : str = fname.split(os.path.sep )[-1]
return re.search(R"^(.*)_\d+\.jpg$" , UpperCamelCase_ ).groups()[0]
class snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Any=None , __magic_name__ : List[Any]=None ) -> str:
lowerCamelCase_ : List[Any] = file_names
lowerCamelCase_ : List[str] = image_transform
lowerCamelCase_ : Optional[int] = label_to_id
def __len__( self : Any ) -> Tuple:
return len(self.file_names )
def __getitem__( self : List[Any] , __magic_name__ : List[Any] ) -> Union[str, Any]:
lowerCamelCase_ : Dict = self.file_names[idx]
lowerCamelCase_ : int = PIL.Image.open(__magic_name__ )
lowerCamelCase_ : Dict = raw_image.convert("RGB" )
if self.image_transform is not None:
lowerCamelCase_ : Tuple = self.image_transform(__magic_name__ )
lowerCamelCase_ : List[Any] = extract_label(__magic_name__ )
if self.label_to_id is not None:
lowerCamelCase_ : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def __a ( __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
if args.with_tracking:
lowerCamelCase_ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCamelCase_ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase_ : int = config["lr"]
lowerCamelCase_ : int = int(config["num_epochs"] )
lowerCamelCase_ : Any = int(config["seed"] )
lowerCamelCase_ : int = int(config["batch_size"] )
lowerCamelCase_ : List[str] = config["image_size"]
if not isinstance(UpperCamelCase_ , (list, tuple) ):
lowerCamelCase_ : Optional[Any] = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
lowerCamelCase_ : List[str] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCamelCase_ : Tuple = int(args.checkpointing_steps )
else:
raise ValueError(
f"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
lowerCamelCase_ : str = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCamelCase_ : str = os.path.split(UpperCamelCase_ )[-1].split("." )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Grab all the image filenames
lowerCamelCase_ : int = [os.path.join(args.data_dir , UpperCamelCase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
lowerCamelCase_ : int = [extract_label(UpperCamelCase_ ) for fname in file_names]
lowerCamelCase_ : Union[str, Any] = list(set(UpperCamelCase_ ) )
id_to_label.sort()
lowerCamelCase_ : Dict = {lbl: i for i, lbl in enumerate(UpperCamelCase_ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase_ )
torch.manual_seed(UpperCamelCase_ )
torch.cuda.manual_seed_all(UpperCamelCase_ )
# Split our filenames between train and validation
lowerCamelCase_ : Optional[Any] = np.random.permutation(len(UpperCamelCase_ ) )
lowerCamelCase_ : int = int(0.8 * len(UpperCamelCase_ ) )
lowerCamelCase_ : int = random_perm[:cut]
lowerCamelCase_ : int = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCamelCase_ : str = Compose([RandomResizedCrop(UpperCamelCase_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCamelCase_ : Optional[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase_ , label_to_id=UpperCamelCase_ )
# For evaluation, we use a deterministic Resize
lowerCamelCase_ : Tuple = Compose([Resize(UpperCamelCase_ ), ToTensor()] )
lowerCamelCase_ : Any = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase_ , label_to_id=UpperCamelCase_ )
# Instantiate dataloaders.
lowerCamelCase_ : Optional[int] = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
lowerCamelCase_ : List[Any] = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase_ : Tuple = create_model("resnet50d" , pretrained=UpperCamelCase_ , num_classes=len(UpperCamelCase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCamelCase_ : Union[str, Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCamelCase_ : Union[str, Any] = False
for param in model.get_classifier().parameters():
lowerCamelCase_ : List[Any] = True
# We normalize the batches of images to be a bit faster.
lowerCamelCase_ : str = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
lowerCamelCase_ : List[Any] = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCamelCase_ : List[str] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCamelCase_ : Union[str, Any] = OneCycleLR(optimizer=UpperCamelCase_ , max_lr=UpperCamelCase_ , epochs=UpperCamelCase_ , steps_per_epoch=len(UpperCamelCase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase_ : Any = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCamelCase_ : Tuple = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCamelCase_ : Dict = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCamelCase_ : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCamelCase_ : Tuple = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCamelCase_ : int = os.path.splitext(UpperCamelCase_ )[0]
if "epoch" in training_difference:
lowerCamelCase_ : Optional[int] = int(training_difference.replace("epoch_" , "" ) ) + 1
lowerCamelCase_ : Dict = None
else:
lowerCamelCase_ : Tuple = int(training_difference.replace("step_" , "" ) )
lowerCamelCase_ : str = resume_step // len(UpperCamelCase_ )
resume_step -= starting_epoch * len(UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ , UpperCamelCase_ ):
model.train()
if args.with_tracking:
lowerCamelCase_ : Tuple = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCamelCase_ : List[str] = accelerator.skip_first_batches(UpperCamelCase_ , UpperCamelCase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCamelCase_ : List[str] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase_ : Any = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase_ : str = (batch["image"] - mean) / std
lowerCamelCase_ : Optional[Any] = model(UpperCamelCase_ )
lowerCamelCase_ : int = torch.nn.functional.cross_entropy(UpperCamelCase_ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase_ : List[Any] = f"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCamelCase_ : Dict = os.path.join(args.output_dir , UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
model.eval()
lowerCamelCase_ : Optional[Any] = 0
lowerCamelCase_ : List[Any] = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCamelCase_ : Dict = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCamelCase_ : List[Any] = (batch["image"] - mean) / std
with torch.no_grad():
lowerCamelCase_ : List[str] = model(UpperCamelCase_ )
lowerCamelCase_ : Optional[int] = outputs.argmax(dim=-1 )
lowerCamelCase_ , lowerCamelCase_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["label"]) )
lowerCamelCase_ : Optional[int] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCamelCase_ : Union[str, Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(UpperCamelCase_ ),
"epoch": epoch,
} , step=UpperCamelCase_ , )
if checkpointing_steps == "epoch":
lowerCamelCase_ : str = f"epoch_{epoch}"
if args.output_dir is not None:
lowerCamelCase_ : List[str] = os.path.join(args.output_dir , UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
if args.with_tracking:
accelerator.end_training()
def __a ( ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : List[Any] = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=UpperCamelCase_ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=UpperCamelCase_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCamelCase_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCamelCase_ : List[str] = parser.parse_args()
lowerCamelCase_ : Union[str, Any] = {"lr": 3e-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 488 |
'''simple docstring'''
import sys
from collections import defaultdict
class A :
def __init__( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = []
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
return self.node_position[vertex]
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = pos
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
lowerCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
lowerCAmelCase__ = 2 * start + 1
else:
lowerCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
lowerCAmelCase__ ,lowerCAmelCase__ = heap[smallest_child], positions[smallest_child]
lowerCAmelCase__ ,lowerCAmelCase__ = (
heap[start],
positions[start],
)
lowerCAmelCase__ ,lowerCAmelCase__ = temp, tempa
lowerCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __magic_name__ )
self.top_to_bottom(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = position[index]
while index != 0:
lowerCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
lowerCAmelCase__ = heap[parent]
lowerCAmelCase__ = position[parent]
self.set_position(position[parent] , __magic_name__ )
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , __magic_name__ )
break
lowerCAmelCase__ = parent
else:
lowerCAmelCase__ = val
lowerCAmelCase__ = temp
self.set_position(__magic_name__ , 0 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : int ):
"""simple docstring"""
lowerCAmelCase__ = len(__magic_name__ ) // 2 - 1
for i in range(__magic_name__ , -1 , -1 ):
self.top_to_bottom(__magic_name__ , __magic_name__ , len(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = positions[0]
lowerCAmelCase__ = sys.maxsize
self.top_to_bottom(__magic_name__ , 0 , len(__magic_name__ ) , __magic_name__ )
return temp
def A ( UpperCamelCase_ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = Heap()
lowerCAmelCase__ = [0] * len(UpperCamelCase_ )
lowerCAmelCase__ = [-1] * len(UpperCamelCase_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
lowerCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
lowerCAmelCase__ = []
for vertex in range(len(UpperCamelCase_ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase_ )
heap.node_position.append(UpperCamelCase_ )
lowerCAmelCase__ = []
lowerCAmelCase__ = 1
lowerCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
lowerCAmelCase__ = 0
lowerCAmelCase__ = distance
heap.heapify(UpperCamelCase_ , UpperCamelCase_ )
for _ in range(1 , len(UpperCamelCase_ ) ):
lowerCAmelCase__ = heap.delete_minimum(UpperCamelCase_ , UpperCamelCase_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
lowerCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_ )]
):
lowerCAmelCase__ = distance
heap.bottom_to_top(
UpperCamelCase_ , heap.get_position(UpperCamelCase_ ) , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCAmelCase__ : Optional[int] = int(input("Enter number of edges: ").strip())
UpperCAmelCase__ : str = defaultdict(list)
for _ in range(edges_number):
UpperCAmelCase__ : int = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 48 | 0 |
"""simple docstring"""
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__UpperCamelCase : str = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def __SCREAMING_SNAKE_CASE ( A_ ):
config.addinivalue_line(
'''markers''' , '''is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested''' )
config.addinivalue_line(
'''markers''' , '''is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested''' )
config.addinivalue_line('''markers''' , '''is_pipeline_test: mark test to run only when pipelines are tested''' )
config.addinivalue_line('''markers''' , '''is_staging_test: mark test to run only in the staging environment''' )
config.addinivalue_line('''markers''' , '''accelerate_tests: mark test that require accelerate''' )
config.addinivalue_line('''markers''' , '''tool_tests: mark the tool tests that are run on their specific schedule''' )
def __SCREAMING_SNAKE_CASE ( A_ ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(UpperCamelCase_ )
def __SCREAMING_SNAKE_CASE ( A_ ):
from transformers.testing_utils import pytest_terminal_summary_main
lowerCAmelCase__ : Tuple = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(UpperCamelCase_ , id=UpperCamelCase_ )
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
if exitstatus == 5:
lowerCAmelCase__ : int = 0
# Doctest custom flag to ignore output.
__UpperCamelCase : List[str] = doctest.register_optionflag('''IGNORE_RESULT''')
__UpperCamelCase : Optional[Any] = doctest.OutputChecker
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ,lowercase_ : Dict ,lowercase_ : Optional[int] ,lowercase_ : Tuple ):
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self ,lowercase_ ,lowercase_ ,lowercase_ )
__UpperCamelCase : Any = CustomOutputChecker
__UpperCamelCase : str = HfDoctestModule
__UpperCamelCase : Optional[int] = HfDocTestParser
| 450 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase__ : Tuple = get_tests_dir("fixtures/test_sentencepiece.model")
if is_sentencepiece_available():
import sentencepiece as sp
UpperCAmelCase__ : Tuple = 5
UpperCAmelCase__ : List[Any] = 10
@require_sentencepiece
@require_tokenizers
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Tuple = SpeechaTextTokenizer
snake_case__ :Dict = False
snake_case__ :Optional[int] = True
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
super().setUp()
lowerCAmelCase__ = sp.SentencePieceProcessor()
spm_model.Load(__magic_name__ )
lowerCAmelCase__ = ["<s>", "<pad>", "</s>", "<unk>"]
vocab += [spm_model.IdToPiece(id_ ) for id_ in range(len(__magic_name__ ) )]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = Path(self.tmpdirname )
save_json(__magic_name__ , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(__magic_name__ , save_dir / VOCAB_FILES_NAMES["spm_file"] )
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = "<pad>"
lowerCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__magic_name__ ) , __magic_name__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__magic_name__ ) , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1001 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(self.tmpdirname )
lowerCAmelCase__ = tokenizer.tokenize("This is a test" )
self.assertListEqual(__magic_name__ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__magic_name__ ) , [289, 50, 14, 174, 386] , )
lowerCAmelCase__ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", "."] , )
lowerCAmelCase__ = tokenizer.convert_tokens_to_ids(__magic_name__ )
self.assertListEqual(__magic_name__ , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8] )
lowerCAmelCase__ = tokenizer.convert_ids_to_tokens(__magic_name__ )
self.assertListEqual(
__magic_name__ , [SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", "."] , )
@slow
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = {"input_ids": [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__magic_name__ , model_name="facebook/s2t-small-mustc-en-de-st" , revision="a14f04cf0776c02f62a8cb800cf7909e15ea23ad" , )
@require_sentencepiece
class A ( unittest.TestCase ):
snake_case__ :Union[str, Any] = 'valhalla/s2t_mustc_multilinguial_medium'
snake_case__ :Tuple = 'C\'est trop cool'
snake_case__ :List[str] = 'Esto es genial'
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name )
return cls
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.tokenizer.lang_code_to_id["pt"] , 4 )
self.assertEqual(self.tokenizer.lang_code_to_id["ru"] , 6 )
self.assertEqual(self.tokenizer.lang_code_to_id["it"] , 9 )
self.assertEqual(self.tokenizer.lang_code_to_id["de"] , 11 )
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
self.assertEqual(self.tokenizer.vocab_size , 10000 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
self.assertIn(__magic_name__ , self.tokenizer.all_special_ids )
lowerCAmelCase__ = [ES_CODE, 4, 1601, 47, 7647, 2]
lowerCAmelCase__ = self.tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ )
lowerCAmelCase__ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
self.assertNotIn(self.tokenizer.eos_token , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
lowerCAmelCase__ = self.tokenizer(self.french_text ).input_ids
self.assertEqual(encoded[0] , __magic_name__ )
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id )
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = "fr"
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE] )
lowerCAmelCase__ = "es"
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE] )
| 48 | 0 |
from __future__ import annotations
_SCREAMING_SNAKE_CASE = "Muhammad Umer Farooq"
_SCREAMING_SNAKE_CASE = "MIT"
_SCREAMING_SNAKE_CASE = "1.0.0"
_SCREAMING_SNAKE_CASE = "Muhammad Umer Farooq"
_SCREAMING_SNAKE_CASE = "[email protected]"
_SCREAMING_SNAKE_CASE = "Alpha"
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class a ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ) -> List[Any]:
super().__init__()
_A = []
_A = domain
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
_A = parse.urljoin(self.domain , lowerCAmelCase_ )
self.urls.append(lowerCAmelCase_ )
def snake_case ( snake_case__ :str) -> str:
return ".".join(get_sub_domain_name(UpperCamelCase_).split(""".""")[-2:])
def snake_case ( snake_case__ :str) -> str:
return parse.urlparse(UpperCamelCase_).netloc
def snake_case ( snake_case__ :str = "https://github.com") -> list[str]:
_A = get_domain_name(UpperCamelCase_)
# Initialize the parser
_A = Parser(UpperCamelCase_)
try:
# Open URL
_A = requests.get(UpperCamelCase_)
# pass the raw HTML to the parser to get links
parser.feed(r.text)
# Get links and loop through
_A = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
_A = requests.get(UpperCamelCase_)
# Get the valid email.
_A = re.findall("""[a-zA-Z0-9]+@""" + domain , read.text)
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCamelCase_)
except ValueError:
pass
except ValueError:
raise SystemExit(1)
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCamelCase_)
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = emails_from_url('https://github.com')
print(F'''{len(emails)} emails found:''')
print('\n'.join(sorted(emails)))
| 401 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
# General docstring
UpperCAmelCase__ : int = "RegNetConfig"
# Base docstring
UpperCAmelCase__ : Optional[int] = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[int] = [1, 10_88, 7, 7]
# Image classification docstring
UpperCAmelCase__ : Tuple = "facebook/regnet-y-040"
UpperCAmelCase__ : Optional[Any] = "tabby, tabby cat"
UpperCAmelCase__ : int = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class A ( tf.keras.layers.Layer ):
def __init__( self : str , __magic_name__ : int , __magic_name__ : int = 3 , __magic_name__ : int = 1 , __magic_name__ : int = 1 , __magic_name__ : Optional[str] = "relu" , **__magic_name__ : int , ):
"""simple docstring"""
super().__init__(**__magic_name__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowerCAmelCase__ = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=__magic_name__ , strides=__magic_name__ , padding="VALID" , groups=__magic_name__ , use_bias=__magic_name__ , name="convolution" , )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
lowerCAmelCase__ = ACTaFN[activation] if activation is not None else tf.identity
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = self.convolution(self.padding(__magic_name__ ) )
lowerCAmelCase__ = self.normalization(__magic_name__ )
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : List[Any] , __magic_name__ : RegNetConfig , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config.num_channels
lowerCAmelCase__ = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name="embedder" , )
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = shape_list(__magic_name__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 2, 3, 1) )
lowerCAmelCase__ = self.embedder(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Any , __magic_name__ : int , __magic_name__ : int = 2 , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.ConvaD(
filters=__magic_name__ , kernel_size=1 , strides=__magic_name__ , use_bias=__magic_name__ , name="convolution" )
lowerCAmelCase__ = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name="normalization" )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : bool = False ):
"""simple docstring"""
return self.normalization(self.convolution(__magic_name__ ) , training=__magic_name__ )
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , **__magic_name__ : List[Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
lowerCAmelCase__ = [
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="relu" , name="attention.0" ),
tf.keras.layers.ConvaD(filters=__magic_name__ , kernel_size=1 , activation="sigmoid" , name="attention.2" ),
]
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.pooler(__magic_name__ )
for layer_module in self.attention:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = hidden_state * pooled
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.2" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : int , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 1 , **__magic_name__ : str ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = in_channels != out_channels or stride != 1
lowerCAmelCase__ = max(1 , out_channels // config.groups_width )
lowerCAmelCase__ = (
TFRegNetShortCut(__magic_name__ , stride=__magic_name__ , name="shortcut" )
if should_apply_shortcut
else tf.keras.layers.Activation("linear" , name="shortcut" )
)
lowerCAmelCase__ = [
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=config.hidden_act , name="layer.0" ),
TFRegNetConvLayer(
__magic_name__ , stride=__magic_name__ , groups=__magic_name__ , activation=config.hidden_act , name="layer.1" ),
TFRegNetSELayer(__magic_name__ , reduced_channels=int(round(in_channels / 4 ) ) , name="layer.2" ),
TFRegNetConvLayer(__magic_name__ , kernel_size=1 , activation=__magic_name__ , name="layer.3" ),
]
lowerCAmelCase__ = ACTaFN[config.hidden_act]
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Any ):
"""simple docstring"""
lowerCAmelCase__ = hidden_state
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
lowerCAmelCase__ = self.shortcut(__magic_name__ )
hidden_state += residual
lowerCAmelCase__ = self.activation(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Union[str, Any] , __magic_name__ : RegNetConfig , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int = 2 , __magic_name__ : int = 2 , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = TFRegNetXLayer if config.layer_type == "x" else TFRegNetYLayer
lowerCAmelCase__ = [
# downsampling is done in the first layer with stride of 2
layer(__magic_name__ , __magic_name__ , __magic_name__ , stride=__magic_name__ , name="layers.0" ),
*[layer(__magic_name__ , __magic_name__ , __magic_name__ , name=f"""layers.{i+1}""" ) for i in range(depth - 1 )],
]
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[str] ):
"""simple docstring"""
for layer_module in self.layers:
lowerCAmelCase__ = layer_module(__magic_name__ )
return hidden_state
class A ( tf.keras.layers.Layer ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
__magic_name__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name="stages.0" , ) )
lowerCAmelCase__ = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(__magic_name__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(__magic_name__ , __magic_name__ , __magic_name__ , depth=__magic_name__ , name=f"""stages.{i+1}""" ) )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : tf.Tensor , __magic_name__ : bool = False , __magic_name__ : bool = True ):
"""simple docstring"""
lowerCAmelCase__ = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
lowerCAmelCase__ = stage_module(__magic_name__ )
if output_hidden_states:
lowerCAmelCase__ = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=__magic_name__ , hidden_states=__magic_name__ )
@keras_serializable
class A ( tf.keras.layers.Layer ):
snake_case__ :List[Any] = RegNetConfig
def __init__( self : str , __magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(**__magic_name__ )
lowerCAmelCase__ = config
lowerCAmelCase__ = TFRegNetEmbeddings(__magic_name__ , name="embedder" )
lowerCAmelCase__ = TFRegNetEncoder(__magic_name__ , name="encoder" )
lowerCAmelCase__ = tf.keras.layers.GlobalAveragePoolingaD(keepdims=__magic_name__ , name="pooler" )
@unpack_inputs
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.embedder(__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = self.encoder(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = encoder_outputs[0]
lowerCAmelCase__ = self.pooler(__magic_name__ )
# Change to NCHW output format have uniformity in the modules
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
lowerCAmelCase__ = tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowerCAmelCase__ = tuple([tf.transpose(__magic_name__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=__magic_name__ , pooler_output=__magic_name__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :str = RegNetConfig
snake_case__ :Optional[Any] = 'regnet'
snake_case__ :Tuple = 'pixel_values'
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
UpperCAmelCase__ : List[str] = R"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCAmelCase__ : Tuple = R"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'The bare RegNet model outputting raw features without any specific head on top.' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : Any , __magic_name__ : RegNetConfig , *__magic_name__ : Optional[int] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : tf.Tensor , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : int=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
pixel_values=__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , SCREAMING_SNAKE_CASE__ , )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
def __init__( self : Tuple , __magic_name__ : RegNetConfig , *__magic_name__ : Tuple , **__magic_name__ : Optional[int] ):
"""simple docstring"""
super().__init__(__magic_name__ , *__magic_name__ , **__magic_name__ )
lowerCAmelCase__ = config.num_labels
lowerCAmelCase__ = TFRegNetMainLayer(__magic_name__ , name="regnet" )
# classification head
lowerCAmelCase__ = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name="classifier.1" ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(__magic_name__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=__magic_name__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : tf.Tensor = None , __magic_name__ : tf.Tensor = None , __magic_name__ : bool = None , __magic_name__ : bool = None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCAmelCase__ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCAmelCase__ = self.regnet(
__magic_name__ , output_hidden_states=__magic_name__ , return_dict=__magic_name__ , training=__magic_name__ )
lowerCAmelCase__ = outputs.pooler_output if return_dict else outputs[1]
lowerCAmelCase__ = self.classifier[0](__magic_name__ )
lowerCAmelCase__ = self.classifier[1](__magic_name__ )
lowerCAmelCase__ = None if labels is None else self.hf_compute_loss(labels=__magic_name__ , logits=__magic_name__ )
if not return_dict:
lowerCAmelCase__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=__magic_name__ , logits=__magic_name__ , hidden_states=outputs.hidden_states )
| 48 | 0 |
'''simple docstring'''
__snake_case = 8.314462 # Unit - J mol-1 K-1
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float:
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->float:
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError("""Invalid inputs. Enter positive value.""" )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 451 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def A ( UpperCamelCase_ : Tuple ) -> int:
'''simple docstring'''
for param in module.parameters():
lowerCAmelCase__ = False
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
lowerCAmelCase__ = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase__ = plt.imshow(UpperCamelCase_ )
fig.axes.get_xaxis().set_visible(UpperCamelCase_ )
fig.axes.get_yaxis().set_visible(UpperCamelCase_ )
plt.show()
def A ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase__ = datetime.now()
lowerCAmelCase__ = current_time.strftime("%H:%M:%S" )
return timestamp
| 48 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowercase , lowercase=7 , lowercase=3 , lowercase=18 , lowercase=30 , lowercase=400 , lowercase=True , lowercase=None , lowercase=True , ) -> int:
'''simple docstring'''
A__ = size if size is not None else {"height": 18, "width": 18}
A__ = parent
A__ = batch_size
A__ = num_channels
A__ = image_size
A__ = min_resolution
A__ = max_resolution
A__ = do_resize
A__ = size
A__ = apply_ocr
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class a__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = LayoutLMvaImageProcessingTester(self )
@property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self ) -> str:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase , "do_resize" ) )
self.assertTrue(hasattr(lowercase , "size" ) )
self.assertTrue(hasattr(lowercase , "apply_ocr" ) )
def UpperCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
A__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
A__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCamelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def UpperCamelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , Image.Image )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , lowercase )
self.assertIsInstance(encoding.boxes , lowercase )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , numpify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , np.ndarray )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase , torchify=lowercase )
for image in image_inputs:
self.assertIsInstance(lowercase , torch.Tensor )
# Test not batched input
A__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
A__ = image_processing(lowercase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
A__ = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
A__ = Image.open(ds[0]["file"] ).convert("RGB" )
A__ = image_processing(lowercase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
A__ = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
A__ = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , lowercase )
self.assertListEqual(encoding.boxes , lowercase )
# with apply_OCR = False
A__ = LayoutLMvaImageProcessor(apply_ocr=lowercase )
A__ = image_processing(lowercase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 514 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase__ : List[Any] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Union[str, Any] = ["EncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[int] = ["TFEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = ["FlaxEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 48 | 0 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
UpperCAmelCase__ = ["gpt2"]
UpperCAmelCase__ = "gpt2"
if is_tf_available():
class a__ ( tf.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase_ : List[Any] ) -> Union[str, Any]:
super().__init__()
__A= tokenizer
__A= AutoConfig.from_pretrained(lowerCAmelCase_ )
__A= TFGPTaLMHeadModel.from_config(lowerCAmelCase_ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) )
def lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : List[str] ) -> List[Any]:
__A= self.tokenizer(lowerCAmelCase_ )
__A= tokenized['input_ids'].to_tensor()
__A= tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__A= self.model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )['logits']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase ( self : str ) -> int:
super().setUp()
__A= [GPTaTokenizer.from_pretrained(lowerCAmelCase_ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__A= [TFGPTaTokenizer.from_pretrained(lowerCAmelCase_ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__A= [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
__A= list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCAmelCase ( self : Any ) -> Union[str, Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__A= tokenizer([test_inputs] , return_tensors='tf' )
__A= tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__A= python_outputs[key].numpy()
__A= tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase_ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
__A= tf.function(lowerCAmelCase_ )
for test_inputs in self.test_sentences:
__A= tf.constant(lowerCAmelCase_ )
__A= compiled_tokenizer(lowerCAmelCase_ )
__A= tf_tokenizer(lowerCAmelCase_ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCAmelCase ( self : str ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
__A= ModelToSave(tokenizer=lowerCAmelCase_ )
__A= tf.convert_to_tensor([self.test_sentences[0]] )
__A= model.serving(lowerCAmelCase_ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__A= Path(lowerCAmelCase_ ) / 'saved.model'
tf.saved_model.save(lowerCAmelCase_ , lowerCAmelCase_ , signatures={'serving_default': model.serving} )
__A= tf.saved_model.load(lowerCAmelCase_ )
__A= loaded_model.signatures['serving_default'](lowerCAmelCase_ )['output_0']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCAmelCase ( self : Any ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
__A= tf.convert_to_tensor([self.test_sentences[0]] )
__A= tf_tokenizer(lowerCAmelCase_ ) # Build model with some sample inputs
__A= tf_tokenizer.get_config()
__A= TFGPTaTokenizer.from_config(lowerCAmelCase_ )
__A= model_from_config(lowerCAmelCase_ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCAmelCase ( self : Any ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__A= 123_123
for max_length in [3, 5, 1_024]:
__A= tf.convert_to_tensor([self.test_sentences[0]] )
__A= tf_tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ )
__A= out['input_ids'].numpy().shape[1]
assert out_length == max_length
| 186 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=SCREAMING_SNAKE_CASE__ )
class _lowerCAmelCase ( SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE : str = field(default='automatic-speech-recognition' , metadata={'include_in_asdict_even_if_is_default': True} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'audio': Audio()} )
__SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({'transcription': Value('string' )} )
__SCREAMING_SNAKE_CASE : str = "audio"
__SCREAMING_SNAKE_CASE : str = "transcription"
def _a (self , lowercase ):
if self.audio_column not in features:
raise ValueError(F'Column {self.audio_column} is not present in features.' )
if not isinstance(features[self.audio_column] , lowercase ):
raise ValueError(F'Column {self.audio_column} is not an Audio type.' )
A_ : Dict = copy.deepcopy(self )
A_ : Dict = self.input_schema.copy()
A_ : Optional[int] = features[self.audio_column]
A_ : Optional[int] = input_schema
return task_template
@property
def _a (self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 667 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class A :
def __init__( self : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : str=13 , __magic_name__ : List[str]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=True , __magic_name__ : str=True , __magic_name__ : int=True , __magic_name__ : int=99 , __magic_name__ : List[str]=[1, 1, 2] , __magic_name__ : Dict=1 , __magic_name__ : Tuple=32 , __magic_name__ : Any=4 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[Any]=37 , __magic_name__ : Tuple="gelu_new" , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : List[str]=0.1 , __magic_name__ : Tuple=0.0 , __magic_name__ : int=512 , __magic_name__ : Optional[int]=3 , __magic_name__ : List[str]=0.02 , __magic_name__ : Dict=3 , __magic_name__ : List[Any]=4 , __magic_name__ : Any=None , __magic_name__ : Dict=False , ):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_token_type_ids
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = block_sizes
lowerCAmelCase__ = num_decoder_layers
lowerCAmelCase__ = d_model
lowerCAmelCase__ = n_head
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = hidden_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = type_vocab_size
lowerCAmelCase__ = 2
lowerCAmelCase__ = num_labels
lowerCAmelCase__ = num_choices
lowerCAmelCase__ = scope
lowerCAmelCase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowerCAmelCase__ = n_head
# Used in the tests to check the size of the first hidden state
lowerCAmelCase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowerCAmelCase__ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowerCAmelCase__ = self.num_hidden_layers + 2
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = None
if self.use_token_type_ids:
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] , __magic_name__ : Union[str, Any] , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str , __magic_name__ : Any , __magic_name__ : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : int , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = [input_ids, input_mask]
lowerCAmelCase__ = model(__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
lowerCAmelCase__ = False
lowerCAmelCase__ = TFFunnelBaseModel(config=__magic_name__ )
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : str , __magic_name__ : Optional[Any] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForPreTraining(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : int , __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , __magic_name__ : Dict , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForMaskedLM(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : List[Any] , __magic_name__ : Any , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForSequenceClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] , __magic_name__ : Any , __magic_name__ : Any , __magic_name__ : List[str] , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_choices
lowerCAmelCase__ = TFFunnelForMultipleChoice(config=__magic_name__ )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = tf.tile(tf.expand_dims(__magic_name__ , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase__ = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : Dict , __magic_name__ : Any , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : int , __magic_name__ : Optional[int] , __magic_name__ : str , ):
"""simple docstring"""
lowerCAmelCase__ = self.num_labels
lowerCAmelCase__ = TFFunnelForTokenClassification(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any] , __magic_name__ : str , __magic_name__ : Dict , __magic_name__ : Optional[int] , __magic_name__ : str , __magic_name__ : List[str] , ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelForQuestionAnswering(config=__magic_name__ )
lowerCAmelCase__ = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCAmelCase__ = model(__magic_name__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,(
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ :Any = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ :str = False
snake_case__ :Any = False
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__magic_name__ )
@require_tf
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
snake_case__ :int = False
snake_case__ :List[Any] = False
def __SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
lowerCAmelCase__ = TFFunnelModelTester(self , base=__magic_name__ )
lowerCAmelCase__ = ConfigTester(self , config_class=__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__magic_name__ )
| 48 | 0 |
def snake_case__ ( lowerCamelCase_ ):
A : Dict = len(UpperCamelCase_ )
for i in range(UpperCamelCase_ ):
for j in range(i + 1 , UpperCamelCase_ ):
if numbers[j] < numbers[i]:
A , A : Any = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase : Dict = input("Enter numbers separated by a comma:\n").strip()
lowercase : str = [int(item) for item in user_input.split(",")]
print(exchange_sort(unsorted))
| 542 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class lowercase_ :
"""simple docstring"""
__lowerCAmelCase = BlenderbotSmallConfig
__lowerCAmelCase = {}
__lowerCAmelCase = 'gelu'
def __init__( self : Any, UpperCamelCase__ : int, UpperCamelCase__ : int=13, UpperCamelCase__ : int=7, UpperCamelCase__ : Any=True, UpperCamelCase__ : Tuple=False, UpperCamelCase__ : Union[str, Any]=99, UpperCamelCase__ : Union[str, Any]=32, UpperCamelCase__ : Tuple=2, UpperCamelCase__ : List[str]=4, UpperCamelCase__ : List[Any]=37, UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : Optional[Any]=0.1, UpperCamelCase__ : Union[str, Any]=20, UpperCamelCase__ : Tuple=2, UpperCamelCase__ : List[str]=1, UpperCamelCase__ : Union[str, Any]=0, ) -> List[str]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = eos_token_id
_A = pad_token_id
_A = bos_token_id
def __UpperCAmelCase ( self : List[Any] ) -> int:
_A = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
_A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
_A = tf.concat([input_ids, eos_tensor], axis=1 )
_A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
_A = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
_A = prepare_blenderbot_small_inputs_dict(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
return config, inputs_dict
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : str ) -> Union[str, Any]:
_A = TFBlenderbotSmallModel(config=UpperCamelCase__ ).get_decoder()
_A = inputs_dict['input_ids']
_A = input_ids[:1, :]
_A = inputs_dict['attention_mask'][:1, :]
_A = inputs_dict['head_mask']
_A = 1
# first forward pass
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, head_mask=UpperCamelCase__, use_cache=UpperCamelCase__ )
_A , _A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_A = ids_tensor((self.batch_size, 3), config.vocab_size )
_A = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
_A = tf.concat([input_ids, next_tokens], axis=-1 )
_A = tf.concat([attention_mask, next_attn_mask], axis=-1 )
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__ )[0]
_A = model(UpperCamelCase__, attention_mask=UpperCamelCase__, past_key_values=UpperCamelCase__ )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
_A = int(ids_tensor((1,), output_from_past.shape[-1] ) )
_A = output_from_no_past[:, -3:, random_slice_idx]
_A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(UpperCamelCase__, UpperCamelCase__, rtol=1e-3 )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : str=None , __snake_case : List[str]=None , __snake_case : Dict=None , __snake_case : List[str]=None , __snake_case : List[str]=None , ):
if attention_mask is None:
_A = tf.cast(tf.math.not_equal(UpperCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowercase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCAmelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase = True
__lowerCAmelCase = False
__lowerCAmelCase = False
def __UpperCAmelCase ( self : Dict ) -> int:
_A = TFBlenderbotSmallModelTester(self )
_A = ConfigTester(self, config_class=UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
_A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*UpperCamelCase__ )
@require_tokenizers
@require_tf
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__lowerCAmelCase = 'facebook/blenderbot_small-90M'
@cached_property
def __UpperCAmelCase ( self : List[Any] ) -> Optional[int]:
return BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
@cached_property
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
_A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
_A = self.tokenizer(self.src_text, return_tensors='tf' )
_A = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=UpperCamelCase__, )
_A = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=UpperCamelCase__ )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 107 |
'''simple docstring'''
from __future__ import annotations
from collections import Counter
from random import random
class A :
def __init__( self : Optional[int] ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = {}
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str , __magic_name__ : str , __magic_name__ : float ):
"""simple docstring"""
if nodea not in self.connections:
self.add_node(__magic_name__ )
if nodea not in self.connections:
self.add_node(__magic_name__ )
lowerCAmelCase__ = probability
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
return list(self.connections )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = 0
lowerCAmelCase__ = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def A ( UpperCamelCase_ : str , UpperCamelCase_ : list[tuple[str, str, float]] , UpperCamelCase_ : int ) -> dict[str, int]:
'''simple docstring'''
lowerCAmelCase__ = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = Counter(graph.get_nodes() )
lowerCAmelCase__ = start
for _ in range(UpperCamelCase_ ):
lowerCAmelCase__ = graph.transition(UpperCamelCase_ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 48 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
_A : Tuple = datasets.load_iris()
_A : Any = np.array(data['data'])
_A : Union[str, Any] = np.array(data['target'])
_A : List[str] = data["target_names"]
_A : List[str] = train_test_split(X, y)
def _a ( UpperCAmelCase , UpperCAmelCase ) -> str:
"""simple docstring"""
return np.linalg.norm(np.array(UpperCamelCase_ ) - np.array(UpperCamelCase_ ) )
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=5 ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = zip(UpperCamelCase_ , UpperCamelCase_ )
# List of distances of all points from the point to be classified
lowerCamelCase__ : Dict = []
for data_point in data:
lowerCamelCase__ : Union[str, Any] = euclidean_distance(data_point[0] , UpperCamelCase_ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
lowerCamelCase__ : str = [i[1] for i in sorted(UpperCamelCase_ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
lowerCamelCase__ : Tuple = Counter(UpperCamelCase_ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 315 |
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
UpperCAmelCase__ : Optional[Any] = pytest.mark.integration
UpperCAmelCase__ : str = {"comet"}
UpperCAmelCase__ : Optional[Any] = importlib.util.find_spec("fairseq") is not None
UpperCAmelCase__ : Optional[int] = {"code_eval"}
UpperCAmelCase__ : List[Any] = os.name == "nt"
UpperCAmelCase__ : Optional[int] = {"bertscore", "frugalscore", "perplexity"}
UpperCAmelCase__ : int = importlib.util.find_spec("transformers") is not None
def A ( UpperCamelCase_ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[Any] , UpperCamelCase_ : List[str] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("\"test requires Fairseq\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : List[Any] ) -> str:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : int ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("\"test requires transformers\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( UpperCamelCase_ : Any ) -> int:
'''simple docstring'''
@wraps(UpperCamelCase_ )
def wrapper(self : Optional[int] , UpperCamelCase_ : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("\"test not supported on Windows\"" )
else:
test_case(self , UpperCamelCase_ )
return wrapper
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("./metrics/*/" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@local
class A ( parameterized.TestCase ):
snake_case__ :Union[str, Any] = {}
snake_case__ :Optional[Any] = None
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:load_metric is deprecated:FutureWarning" )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
lowerCAmelCase__ = datasets.load.import_main_class(metric_module.__name__ , dataset=__magic_name__ )
# check parameters
lowerCAmelCase__ = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__magic_name__ , metric_module.__name__ ):
with self.use_local_metrics():
try:
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Tuple ):
"""simple docstring"""
lowerCAmelCase__ = "[...]"
lowerCAmelCase__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("metrics" , __magic_name__ ) ).module_path )
# run doctest
with self.use_local_metrics():
lowerCAmelCase__ = doctest.testmod(__magic_name__ , verbose=__magic_name__ , raise_on_error=__magic_name__ )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : str ):
"""simple docstring"""
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__magic_name__ ):
yield
else:
yield
@contextmanager
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
def load_local_metric(__magic_name__ : Union[str, Any] , *__magic_name__ : Any , **__magic_name__ : Any ):
return load_metric(os.path.join("metrics" , __magic_name__ ) , *__magic_name__ , **__magic_name__ )
with patch("datasets.load_metric" ) as mock_load_metric:
lowerCAmelCase__ = load_local_metric
yield
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Any , __magic_name__ : Optional[int] ):
"""simple docstring"""
def wrapper(__magic_name__ : Dict ):
lowerCAmelCase__ = contextmanager(__magic_name__ )
lowerCAmelCase__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("bleurt" )
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("sv" , "" , "" ) # handle pytest cli flags
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] ):
"""simple docstring"""
assert len(input_dict["input_ids"] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("bleurt.score._create_predictor" ) as mock_create_predictor:
lowerCAmelCase__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("bertscore" )
def A ( UpperCamelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
import torch
def bert_cos_score_idf(UpperCamelCase_ : List[str] , UpperCamelCase_ : List[Any] , *UpperCamelCase_ : Union[str, Any] , **UpperCamelCase_ : List[str] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(UpperCamelCase_ ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("bert_score.scorer.get_model" ), patch(
"bert_score.scorer.bert_cos_score_idf" ) as mock_bert_cos_score_idf:
lowerCAmelCase__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("comet" )
def A ( UpperCamelCase_ : Optional[int] ) -> Any:
'''simple docstring'''
def load_from_checkpoint(UpperCamelCase_ : Tuple ):
class A :
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , *__magic_name__ : int , **__magic_name__ : Dict ):
"""simple docstring"""
assert len(__magic_name__ ) == 2
lowerCAmelCase__ = [0.19, 0.92]
return scores, sum(__magic_name__ ) / len(__magic_name__ )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("comet.download_model" ) as mock_download_model:
lowerCAmelCase__ = None
with patch("comet.load_from_checkpoint" ) as mock_load_from_checkpoint:
lowerCAmelCase__ = load_from_checkpoint
yield
def A ( ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = load_metric(os.path.join("metrics" , "seqeval" ) )
lowerCAmelCase__ = "ERROR"
lowerCAmelCase__ = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(UpperCamelCase_ , match=re.escape(UpperCamelCase_ ) ):
metric.compute(predictions=[] , references=[] , scheme=UpperCamelCase_ )
| 48 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.